shithub: libvpx

Download patch

ref: a68356202d3f4c75194b5ae0efc7facb078cefea
parent: d10fc5af8fb01af12dd83a3f48a0f0ef4fd3810f
author: Jingning Han <[email protected]>
date: Mon Aug 3 05:54:13 EDT 2015

Remove vpx_ prefix from the dspr2 file name in vpx_dsp/mips

Make it consistent with other formats.

Change-Id: I28f0d05ff7c5bf2b815989b3f1bd6c6b25608677

--- /dev/null
+++ b/vpx_dsp/mips/convolve2_avg_dspr2.c
@@ -1,0 +1,279 @@
+/*
+ *  Copyright (c) 2013 The WebM project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <assert.h>
+#include <stdio.h>
+
+#include "./vpx_dsp_rtcd.h"
+#include "vpx_dsp/mips/convolve_common_dspr2.h"
+#include "vpx_dsp/vpx_convolve.h"
+#include "vpx_dsp/vpx_dsp_common.h"
+#include "vpx_ports/mem.h"
+
+#if HAVE_DSPR2
+static void convolve_bi_avg_vert_4_dspr2(const uint8_t *src,
+                                         int32_t src_stride,
+                                         uint8_t *dst,
+                                         int32_t dst_stride,
+                                         const int16_t *filter_y,
+                                         int32_t w,
+                                         int32_t h) {
+  int32_t       x, y;
+  const uint8_t *src_ptr;
+  uint8_t       *dst_ptr;
+  uint8_t       *cm = vpx_ff_cropTbl;
+  uint32_t      vector4a = 64;
+  uint32_t      load1, load2;
+  uint32_t      p1, p2;
+  uint32_t      scratch1, scratch2;
+  uint32_t      store1, store2;
+  int32_t       Temp1, Temp2;
+  const int16_t *filter = &filter_y[3];
+  uint32_t      filter45;
+
+  filter45 = ((const int32_t *)filter)[0];
+
+  for (y = h; y--;) {
+    /* prefetch data to cache memory */
+    prefetch_store(dst + dst_stride);
+
+    for (x = 0; x < w; x += 4) {
+      src_ptr = src + x;
+      dst_ptr = dst + x;
+
+      __asm__ __volatile__ (
+          "ulw              %[load1],     0(%[src_ptr])                   \n\t"
+          "add              %[src_ptr],   %[src_ptr],     %[src_stride]   \n\t"
+          "ulw              %[load2],     0(%[src_ptr])                   \n\t"
+
+          "mtlo             %[vector4a],  $ac0                            \n\t"
+          "mtlo             %[vector4a],  $ac1                            \n\t"
+          "mtlo             %[vector4a],  $ac2                            \n\t"
+          "mtlo             %[vector4a],  $ac3                            \n\t"
+          "mthi             $zero,        $ac0                            \n\t"
+          "mthi             $zero,        $ac1                            \n\t"
+          "mthi             $zero,        $ac2                            \n\t"
+          "mthi             $zero,        $ac3                            \n\t"
+
+          "preceu.ph.qbr    %[scratch1],  %[load1]                        \n\t"
+          "preceu.ph.qbr    %[p1],        %[load2]                        \n\t"
+          "precrq.ph.w      %[p2],        %[p1],          %[scratch1]     \n\t" /* pixel 2 */
+          "append           %[p1],        %[scratch1],    16              \n\t" /* pixel 1 */
+
+          "dpa.w.ph         $ac0,         %[p1],          %[filter45]     \n\t"
+          "dpa.w.ph         $ac1,         %[p2],          %[filter45]     \n\t"
+
+          "preceu.ph.qbl    %[scratch1],  %[load1]                        \n\t"
+          "preceu.ph.qbl    %[p1],        %[load2]                        \n\t"
+          "precrq.ph.w      %[p2],        %[p1],          %[scratch1]     \n\t" /* pixel 2 */
+          "append           %[p1],        %[scratch1],    16              \n\t" /* pixel 1 */
+
+          "dpa.w.ph         $ac2,         %[p1],          %[filter45]     \n\t"
+          "dpa.w.ph         $ac3,         %[p2],          %[filter45]     \n\t"
+
+          "extp             %[Temp1],     $ac0,           31              \n\t"
+          "extp             %[Temp2],     $ac1,           31              \n\t"
+
+          "lbu              %[scratch1],  0(%[dst_ptr])                   \n\t"
+          "lbu              %[scratch2],  1(%[dst_ptr])                   \n\t"
+
+          "lbux             %[store1],    %[Temp1](%[cm])                 \n\t"
+          "addqh_r.w        %[store1],    %[store1],      %[scratch1]     \n\t" /* pixel 1 */
+          "extp             %[Temp1],     $ac2,           31              \n\t"
+
+          "lbux             %[store2],    %[Temp2](%[cm])                 \n\t"
+          "addqh_r.w        %[store2],    %[store2],      %[scratch2]     \n\t" /* pixel 2 */
+          "extp             %[Temp2],     $ac3,           31              \n\t"
+          "lbu              %[scratch1],  2(%[dst_ptr])                   \n\t"
+
+          "sb               %[store1],    0(%[dst_ptr])                   \n\t"
+          "sb               %[store2],    1(%[dst_ptr])                   \n\t"
+          "lbu              %[scratch2],  3(%[dst_ptr])                   \n\t"
+
+          "lbux             %[store1],    %[Temp1](%[cm])                 \n\t"
+          "lbux             %[store2],    %[Temp2](%[cm])                 \n\t"
+          "addqh_r.w        %[store1],    %[store1],      %[scratch1]     \n\t" /* pixel 3 */
+          "addqh_r.w        %[store2],    %[store2],      %[scratch2]     \n\t" /* pixel 4 */
+
+          "sb               %[store1],    2(%[dst_ptr])                   \n\t"
+          "sb               %[store2],    3(%[dst_ptr])                   \n\t"
+
+          : [load1] "=&r" (load1), [load2] "=&r" (load2),
+            [p1] "=&r" (p1), [p2] "=&r" (p2),
+            [scratch1] "=&r" (scratch1), [scratch2] "=&r" (scratch2),
+            [Temp1] "=&r" (Temp1), [Temp2] "=&r" (Temp2),
+            [store1] "=&r" (store1), [store2] "=&r" (store2),
+            [src_ptr] "+r" (src_ptr)
+          : [filter45] "r" (filter45), [vector4a] "r" (vector4a),
+            [src_stride] "r" (src_stride), [cm] "r" (cm),
+            [dst_ptr] "r" (dst_ptr)
+      );
+    }
+
+    /* Next row... */
+    src += src_stride;
+    dst += dst_stride;
+  }
+}
+
+static void convolve_bi_avg_vert_64_dspr2(const uint8_t *src,
+                                          int32_t src_stride,
+                                          uint8_t *dst,
+                                          int32_t dst_stride,
+                                          const int16_t *filter_y,
+                                          int32_t h) {
+  int32_t       x, y;
+  const uint8_t *src_ptr;
+  uint8_t       *dst_ptr;
+  uint8_t       *cm = vpx_ff_cropTbl;
+  uint32_t      vector4a = 64;
+  uint32_t      load1, load2;
+  uint32_t      p1, p2;
+  uint32_t      scratch1, scratch2;
+  uint32_t      store1, store2;
+  int32_t       Temp1, Temp2;
+  const int16_t *filter = &filter_y[3];
+  uint32_t filter45;;
+
+  filter45 = ((const int32_t *)filter)[0];
+
+  for (y = h; y--;) {
+    /* prefetch data to cache memory */
+    prefetch_store(dst + dst_stride);
+    prefetch_store(dst + dst_stride + 32);
+
+    for (x = 0; x < 64; x += 4) {
+      src_ptr = src + x;
+      dst_ptr = dst + x;
+
+      __asm__ __volatile__ (
+          "ulw              %[load1],     0(%[src_ptr])                   \n\t"
+          "add              %[src_ptr],   %[src_ptr],     %[src_stride]   \n\t"
+          "ulw              %[load2],     0(%[src_ptr])                   \n\t"
+
+          "mtlo             %[vector4a],  $ac0                            \n\t"
+          "mtlo             %[vector4a],  $ac1                            \n\t"
+          "mtlo             %[vector4a],  $ac2                            \n\t"
+          "mtlo             %[vector4a],  $ac3                            \n\t"
+          "mthi             $zero,        $ac0                            \n\t"
+          "mthi             $zero,        $ac1                            \n\t"
+          "mthi             $zero,        $ac2                            \n\t"
+          "mthi             $zero,        $ac3                            \n\t"
+
+          "preceu.ph.qbr    %[scratch1],  %[load1]                        \n\t"
+          "preceu.ph.qbr    %[p1],        %[load2]                        \n\t"
+          "precrq.ph.w      %[p2],        %[p1],          %[scratch1]     \n\t" /* pixel 2 */
+          "append           %[p1],        %[scratch1],    16              \n\t" /* pixel 1 */
+
+          "dpa.w.ph         $ac0,         %[p1],          %[filter45]     \n\t"
+          "dpa.w.ph         $ac1,         %[p2],          %[filter45]     \n\t"
+
+          "preceu.ph.qbl    %[scratch1],  %[load1]                        \n\t"
+          "preceu.ph.qbl    %[p1],        %[load2]                        \n\t"
+          "precrq.ph.w      %[p2],        %[p1],          %[scratch1]     \n\t" /* pixel 2 */
+          "append           %[p1],        %[scratch1],    16              \n\t" /* pixel 1 */
+
+          "dpa.w.ph         $ac2,         %[p1],          %[filter45]     \n\t"
+          "dpa.w.ph         $ac3,         %[p2],          %[filter45]     \n\t"
+
+          "extp             %[Temp1],     $ac0,           31              \n\t"
+          "extp             %[Temp2],     $ac1,           31              \n\t"
+
+          "lbu              %[scratch1],  0(%[dst_ptr])                   \n\t"
+          "lbu              %[scratch2],  1(%[dst_ptr])                   \n\t"
+
+          "lbux             %[store1],    %[Temp1](%[cm])                 \n\t"
+          "addqh_r.w        %[store1],    %[store1],      %[scratch1]     \n\t" /* pixel 1 */
+          "extp             %[Temp1],     $ac2,           31              \n\t"
+
+          "lbux             %[store2],    %[Temp2](%[cm])                 \n\t"
+          "addqh_r.w        %[store2],    %[store2],      %[scratch2]     \n\t" /* pixel 2 */
+          "extp             %[Temp2],     $ac3,           31              \n\t"
+          "lbu              %[scratch1],  2(%[dst_ptr])                   \n\t"
+
+          "sb               %[store1],    0(%[dst_ptr])                   \n\t"
+          "sb               %[store2],    1(%[dst_ptr])                   \n\t"
+          "lbu              %[scratch2],  3(%[dst_ptr])                   \n\t"
+
+          "lbux             %[store1],    %[Temp1](%[cm])                 \n\t"
+          "lbux             %[store2],    %[Temp2](%[cm])                 \n\t"
+          "addqh_r.w        %[store1],    %[store1],      %[scratch1]     \n\t" /* pixel 3 */
+          "addqh_r.w        %[store2],    %[store2],      %[scratch2]     \n\t" /* pixel 4 */
+
+          "sb               %[store1],    2(%[dst_ptr])                   \n\t"
+          "sb               %[store2],    3(%[dst_ptr])                   \n\t"
+
+          : [load1] "=&r" (load1), [load2] "=&r" (load2),
+            [p1] "=&r" (p1), [p2] "=&r" (p2),
+            [scratch1] "=&r" (scratch1), [scratch2] "=&r" (scratch2),
+            [Temp1] "=&r" (Temp1), [Temp2] "=&r" (Temp2),
+            [store1] "=&r" (store1), [store2] "=&r" (store2),
+            [src_ptr] "+r" (src_ptr)
+          : [filter45] "r" (filter45), [vector4a] "r" (vector4a),
+            [src_stride] "r" (src_stride), [cm] "r" (cm),
+            [dst_ptr] "r" (dst_ptr)
+      );
+    }
+
+    /* Next row... */
+    src += src_stride;
+    dst += dst_stride;
+  }
+}
+
+void vpx_convolve2_avg_vert_dspr2(const uint8_t *src, ptrdiff_t src_stride,
+                                  uint8_t *dst, ptrdiff_t dst_stride,
+                                  const int16_t *filter_x, int x_step_q4,
+                                  const int16_t *filter_y, int y_step_q4,
+                                  int w, int h) {
+  if (16 == y_step_q4) {
+    uint32_t pos = 38;
+
+    /* bit positon for extract from acc */
+    __asm__ __volatile__ (
+      "wrdsp      %[pos],     1           \n\t"
+      :
+      : [pos] "r" (pos)
+    );
+
+    prefetch_store(dst);
+
+    switch (w) {
+      case 4:
+      case 8:
+      case 16:
+      case 32:
+        convolve_bi_avg_vert_4_dspr2(src, src_stride,
+                                     dst, dst_stride,
+                                     filter_y, w, h);
+        break;
+      case 64:
+        prefetch_store(dst + 32);
+        convolve_bi_avg_vert_64_dspr2(src, src_stride,
+                                      dst, dst_stride,
+                                      filter_y, h);
+        break;
+      default:
+        vpx_convolve8_avg_vert_c(src, src_stride,
+                                 dst, dst_stride,
+                                 filter_x, x_step_q4,
+                                 filter_y, y_step_q4,
+                                 w, h);
+        break;
+    }
+  } else {
+    vpx_convolve8_avg_vert_c(src, src_stride,
+                             dst, dst_stride,
+                             filter_x, x_step_q4,
+                             filter_y, y_step_q4,
+                             w, h);
+  }
+}
+#endif
--- /dev/null
+++ b/vpx_dsp/mips/convolve2_avg_horiz_dspr2.c
@@ -1,0 +1,831 @@
+/*
+ *  Copyright (c) 2013 The WebM project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <assert.h>
+#include <stdio.h>
+
+#include "./vpx_dsp_rtcd.h"
+#include "vpx_dsp/mips/convolve_common_dspr2.h"
+#include "vpx_dsp/vpx_convolve.h"
+#include "vpx_dsp/vpx_dsp_common.h"
+#include "vpx_ports/mem.h"
+
+#if HAVE_DSPR2
+static void convolve_bi_avg_horiz_4_dspr2(const uint8_t *src,
+                                          int32_t src_stride,
+                                          uint8_t *dst,
+                                          int32_t dst_stride,
+                                          const int16_t *filter_x0,
+                                          int32_t h) {
+  int32_t y;
+  uint8_t *cm = vpx_ff_cropTbl;
+  int32_t  Temp1, Temp2, Temp3, Temp4;
+  uint32_t vector4a = 64;
+  uint32_t tp1, tp2;
+  uint32_t p1, p2, p3;
+  uint32_t tn1, tn2;
+  const int16_t *filter = &filter_x0[3];
+  uint32_t      filter45;
+
+  filter45 = ((const int32_t *)filter)[0];
+
+  for (y = h; y--;) {
+    /* prefetch data to cache memory */
+    prefetch_load(src + src_stride);
+    prefetch_load(src + src_stride + 32);
+    prefetch_store(dst + dst_stride);
+
+    __asm__ __volatile__ (
+        "ulw              %[tp1],         0(%[src])                      \n\t"
+        "ulw              %[tp2],         4(%[src])                      \n\t"
+
+        /* even 1. pixel */
+        "mtlo             %[vector4a],    $ac3                           \n\t"
+        "mthi             $zero,          $ac3                           \n\t"
+        "preceu.ph.qbr    %[p1],          %[tp1]                         \n\t"
+        "preceu.ph.qbl    %[p2],          %[tp1]                         \n\t"
+        "dpa.w.ph         $ac3,           %[p1],          %[filter45]    \n\t"
+        "extp             %[Temp1],       $ac3,           31             \n\t"
+
+        /* even 2. pixel */
+        "mtlo             %[vector4a],    $ac2                           \n\t"
+        "mthi             $zero,          $ac2                           \n\t"
+        "balign           %[tp2],         %[tp1],         3              \n\t"
+        "dpa.w.ph         $ac2,           %[p2],          %[filter45]    \n\t"
+        "extp             %[Temp3],       $ac2,           31             \n\t"
+
+        "lbu              %[p2],          3(%[dst])                      \n\t"  /* load odd 2 */
+
+        /* odd 1. pixel */
+        "lbux             %[tp1],         %[Temp1](%[cm])                \n\t"  /* even 1 */
+        "mtlo             %[vector4a],    $ac3                           \n\t"
+        "mthi             $zero,          $ac3                           \n\t"
+        "lbu              %[Temp1],       1(%[dst])                      \n\t"  /* load odd 1 */
+        "preceu.ph.qbr    %[p1],          %[tp2]                         \n\t"
+        "preceu.ph.qbl    %[p3],          %[tp2]                         \n\t"
+        "dpa.w.ph         $ac3,           %[p1],          %[filter45]    \n\t"
+        "extp             %[Temp2],       $ac3,           31             \n\t"
+
+        "lbu              %[tn2],         0(%[dst])                      \n\t"  /* load even 1 */
+
+        /* odd 2. pixel */
+        "lbux             %[tp2],         %[Temp3](%[cm])                \n\t"  /* even 2 */
+        "mtlo             %[vector4a],    $ac2                           \n\t"
+        "mthi             $zero,          $ac2                           \n\t"
+        "lbux             %[tn1],         %[Temp2](%[cm])                \n\t"  /* odd 1 */
+        "addqh_r.w        %[tn2],         %[tn2],         %[tp1]         \n\t"  /* average even 1 */
+        "dpa.w.ph         $ac2,           %[p3],          %[filter45]    \n\t"
+        "extp             %[Temp4],       $ac2,           31             \n\t"
+
+        "lbu              %[tp1],         2(%[dst])                      \n\t"  /* load even 2 */
+        "sb               %[tn2],         0(%[dst])                      \n\t"  /* store even 1 */
+
+        /* clamp */
+        "addqh_r.w        %[Temp1],       %[Temp1],       %[tn1]         \n\t"  /* average odd 1 */
+        "lbux             %[p3],          %[Temp4](%[cm])                \n\t"  /* odd 2 */
+        "sb               %[Temp1],       1(%[dst])                      \n\t"  /* store odd 1 */
+
+        "addqh_r.w        %[tp1],         %[tp1],         %[tp2]         \n\t"  /* average even 2 */
+        "sb               %[tp1],         2(%[dst])                      \n\t"  /* store even 2 */
+
+        "addqh_r.w        %[p2],          %[p2],          %[p3]          \n\t"  /* average odd 2 */
+        "sb               %[p2],          3(%[dst])                      \n\t"  /* store odd 2 */
+
+        : [tp1] "=&r" (tp1), [tp2] "=&r" (tp2),
+          [tn1] "=&r" (tn1), [tn2] "=&r" (tn2),
+          [p1] "=&r" (p1), [p2] "=&r" (p2), [p3] "=&r" (p3),
+          [Temp1] "=&r" (Temp1), [Temp2] "=&r" (Temp2),
+          [Temp3] "=&r" (Temp3), [Temp4] "=&r" (Temp4)
+        : [filter45] "r" (filter45), [vector4a] "r" (vector4a),
+          [cm] "r" (cm), [dst] "r" (dst), [src] "r" (src)
+    );
+
+    /* Next row... */
+    src += src_stride;
+    dst += dst_stride;
+  }
+}
+
+static void convolve_bi_avg_horiz_8_dspr2(const uint8_t *src,
+                                         int32_t src_stride,
+                                         uint8_t *dst,
+                                         int32_t dst_stride,
+                                         const int16_t *filter_x0,
+                                         int32_t h) {
+  int32_t y;
+  uint8_t *cm = vpx_ff_cropTbl;
+  uint32_t vector4a = 64;
+  int32_t Temp1, Temp2, Temp3;
+  uint32_t tp1, tp2, tp3, tp4;
+  uint32_t p1, p2, p3, p4, n1;
+  uint32_t st0, st1;
+  const int16_t *filter = &filter_x0[3];
+  uint32_t filter45;;
+
+  filter45 = ((const int32_t *)filter)[0];
+
+  for (y = h; y--;) {
+    /* prefetch data to cache memory */
+    prefetch_load(src + src_stride);
+    prefetch_load(src + src_stride + 32);
+    prefetch_store(dst + dst_stride);
+
+    __asm__ __volatile__ (
+        "ulw              %[tp1],         0(%[src])                      \n\t"
+        "ulw              %[tp2],         4(%[src])                      \n\t"
+
+        /* even 1. pixel */
+        "mtlo             %[vector4a],    $ac3                           \n\t"
+        "mthi             $zero,          $ac3                           \n\t"
+        "mtlo             %[vector4a],    $ac2                           \n\t"
+        "mthi             $zero,          $ac2                           \n\t"
+        "preceu.ph.qbr    %[p1],          %[tp1]                         \n\t"
+        "preceu.ph.qbl    %[p2],          %[tp1]                         \n\t"
+        "preceu.ph.qbr    %[p3],          %[tp2]                         \n\t"
+        "preceu.ph.qbl    %[p4],          %[tp2]                         \n\t"
+        "ulw              %[tp3],         8(%[src])                      \n\t"
+        "dpa.w.ph         $ac3,           %[p1],          %[filter45]    \n\t"
+        "extp             %[Temp1],       $ac3,           31             \n\t"
+        "lbu              %[Temp2],       0(%[dst])                      \n\t"
+        "lbu              %[tp4],         2(%[dst])                      \n\t"
+
+        /* even 2. pixel */
+        "dpa.w.ph         $ac2,           %[p2],          %[filter45]    \n\t"
+        "extp             %[Temp3],       $ac2,           31             \n\t"
+
+        /* even 3. pixel */
+        "lbux             %[st0],         %[Temp1](%[cm])                \n\t"
+        "mtlo             %[vector4a],    $ac1                           \n\t"
+        "mthi             $zero,          $ac1                           \n\t"
+        "lbux             %[st1],         %[Temp3](%[cm])                \n\t"
+        "dpa.w.ph         $ac1,           %[p3],          %[filter45]    \n\t"
+        "extp             %[Temp1],       $ac1,           31             \n\t"
+
+        "addqh_r.w        %[Temp2],       %[Temp2],       %[st0]         \n\t"
+        "addqh_r.w        %[tp4],         %[tp4],         %[st1]         \n\t"
+        "sb               %[Temp2],       0(%[dst])                      \n\t"
+        "sb               %[tp4],         2(%[dst])                      \n\t"
+
+        /* even 4. pixel */
+        "mtlo             %[vector4a],    $ac2                           \n\t"
+        "mthi             $zero,          $ac2                           \n\t"
+        "mtlo             %[vector4a],    $ac3                           \n\t"
+        "mthi             $zero,          $ac3                           \n\t"
+
+        "balign           %[tp3],         %[tp2],         3              \n\t"
+        "balign           %[tp2],         %[tp1],         3              \n\t"
+
+        "lbux             %[st0],         %[Temp1](%[cm])                \n\t"
+        "lbu              %[Temp2],       4(%[dst])                      \n\t"
+        "addqh_r.w        %[Temp2],       %[Temp2],       %[st0]         \n\t"
+
+        "dpa.w.ph         $ac2,           %[p4],          %[filter45]    \n\t"
+        "extp             %[Temp3],       $ac2,           31             \n\t"
+
+        /* odd 1. pixel */
+        "mtlo             %[vector4a],    $ac1                           \n\t"
+        "mthi             $zero,          $ac1                           \n\t"
+        "sb               %[Temp2],       4(%[dst])                      \n\t"
+        "preceu.ph.qbr    %[p1],          %[tp2]                         \n\t"
+        "preceu.ph.qbl    %[p2],          %[tp2]                         \n\t"
+        "preceu.ph.qbr    %[p3],          %[tp3]                         \n\t"
+        "preceu.ph.qbl    %[p4],          %[tp3]                         \n\t"
+        "dpa.w.ph         $ac3,           %[p1],          %[filter45]    \n\t"
+        "extp             %[Temp2],       $ac3,           31             \n\t"
+
+        "lbu              %[tp1],         6(%[dst])                      \n\t"
+
+        /* odd 2. pixel */
+        "mtlo             %[vector4a],    $ac3                           \n\t"
+        "mthi             $zero,          $ac3                           \n\t"
+        "mtlo             %[vector4a],    $ac2                           \n\t"
+        "mthi             $zero,          $ac2                           \n\t"
+        "lbux             %[st0],         %[Temp3](%[cm])                \n\t"
+        "dpa.w.ph         $ac1,           %[p2],          %[filter45]    \n\t"
+        "extp             %[Temp3],       $ac1,           31             \n\t"
+
+        "lbu              %[tp2],         1(%[dst])                      \n\t"
+        "lbu              %[tp3],         3(%[dst])                      \n\t"
+        "addqh_r.w        %[tp1],         %[tp1],         %[st0]         \n\t"
+
+        /* odd 3. pixel */
+        "lbux             %[st1],         %[Temp2](%[cm])                \n\t"
+        "dpa.w.ph         $ac3,           %[p3],          %[filter45]    \n\t"
+        "addqh_r.w        %[tp2],         %[tp2],         %[st1]         \n\t"
+        "extp             %[Temp2],       $ac3,           31             \n\t"
+
+        "lbu              %[tp4],         5(%[dst])                      \n\t"
+
+        /* odd 4. pixel */
+        "sb               %[tp2],         1(%[dst])                      \n\t"
+        "sb               %[tp1],         6(%[dst])                      \n\t"
+        "dpa.w.ph         $ac2,           %[p4],          %[filter45]    \n\t"
+        "extp             %[Temp1],       $ac2,           31             \n\t"
+
+        "lbu              %[tp1],         7(%[dst])                      \n\t"
+
+        /* clamp */
+        "lbux             %[p4],          %[Temp3](%[cm])                \n\t"
+        "addqh_r.w        %[tp3],         %[tp3],         %[p4]          \n\t"
+
+        "lbux             %[p2],          %[Temp2](%[cm])                \n\t"
+        "addqh_r.w        %[tp4],         %[tp4],         %[p2]          \n\t"
+
+        "lbux             %[p1],          %[Temp1](%[cm])                \n\t"
+        "addqh_r.w        %[tp1],         %[tp1],         %[p1]          \n\t"
+
+        /* store bytes */
+        "sb               %[tp3],         3(%[dst])                      \n\t"
+        "sb               %[tp4],         5(%[dst])                      \n\t"
+        "sb               %[tp1],         7(%[dst])                      \n\t"
+
+        : [tp1] "=&r" (tp1), [tp2] "=&r" (tp2),
+          [tp3] "=&r" (tp3), [tp4] "=&r" (tp4),
+          [st0] "=&r" (st0), [st1] "=&r" (st1),
+          [p1] "=&r" (p1), [p2] "=&r" (p2), [p3] "=&r" (p3), [p4] "=&r" (p4),
+          [n1] "=&r" (n1),
+          [Temp1] "=&r" (Temp1), [Temp2] "=&r" (Temp2), [Temp3] "=&r" (Temp3)
+        : [filter45] "r" (filter45), [vector4a] "r" (vector4a),
+          [cm] "r" (cm), [dst] "r" (dst), [src] "r" (src)
+    );
+
+    /* Next row... */
+    src += src_stride;
+    dst += dst_stride;
+  }
+}
+
+static void convolve_bi_avg_horiz_16_dspr2(const uint8_t *src_ptr,
+                                          int32_t src_stride,
+                                          uint8_t *dst_ptr,
+                                          int32_t dst_stride,
+                                          const int16_t *filter_x0,
+                                          int32_t h,
+                                          int32_t count) {
+  int32_t y, c;
+  const uint8_t *src;
+  uint8_t *dst;
+  uint8_t *cm = vpx_ff_cropTbl;
+  uint32_t vector_64 = 64;
+  int32_t Temp1, Temp2, Temp3;
+  uint32_t qload1, qload2, qload3;
+  uint32_t p1, p2, p3, p4, p5;
+  uint32_t st1, st2, st3;
+  const int16_t *filter = &filter_x0[3];
+  uint32_t filter45;;
+
+  filter45 = ((const int32_t *)filter)[0];
+
+  for (y = h; y--;) {
+    src = src_ptr;
+    dst = dst_ptr;
+
+    /* prefetch data to cache memory */
+    prefetch_load(src_ptr + src_stride);
+    prefetch_load(src_ptr + src_stride + 32);
+    prefetch_store(dst_ptr + dst_stride);
+
+    for (c = 0; c < count; c++) {
+      __asm__ __volatile__ (
+          "ulw              %[qload1],    0(%[src])                    \n\t"
+          "ulw              %[qload2],    4(%[src])                    \n\t"
+
+          /* even 1. pixel */
+          "mtlo             %[vector_64], $ac1                         \n\t" /* even 1 */
+          "mthi             $zero,        $ac1                         \n\t"
+          "mtlo             %[vector_64], $ac2                         \n\t" /* even 2 */
+          "mthi             $zero,        $ac2                         \n\t"
+          "preceu.ph.qbr    %[p1],        %[qload1]                    \n\t"
+          "preceu.ph.qbl    %[p2],        %[qload1]                    \n\t"
+          "preceu.ph.qbr    %[p3],        %[qload2]                    \n\t"
+          "preceu.ph.qbl    %[p4],        %[qload2]                    \n\t"
+          "ulw              %[qload3],    8(%[src])                    \n\t"
+          "dpa.w.ph         $ac1,         %[p1],          %[filter45]  \n\t" /* even 1 */
+          "extp             %[Temp1],     $ac1,           31           \n\t" /* even 1 */
+          "lbu              %[st2],       0(%[dst])                    \n\t" /* load even 1 from dst */
+
+          /* even 2. pixel */
+          "mtlo             %[vector_64], $ac3                         \n\t" /* even 3 */
+          "mthi             $zero,        $ac3                         \n\t"
+          "preceu.ph.qbr    %[p1],        %[qload3]                    \n\t"
+          "preceu.ph.qbl    %[p5],        %[qload3]                    \n\t"
+          "ulw              %[qload1],    12(%[src])                   \n\t"
+          "dpa.w.ph         $ac2,         %[p2],          %[filter45]  \n\t" /* even 1 */
+          "extp             %[Temp2],     $ac2,           31           \n\t" /* even 1 */
+          "lbux             %[st1],       %[Temp1](%[cm])              \n\t" /* even 1 */
+
+          "lbu              %[qload3],    2(%[dst])                    \n\t" /* load even 2 from dst */
+
+          /* even 3. pixel */
+          "mtlo             %[vector_64], $ac1                         \n\t" /* even 4 */
+          "mthi             $zero,        $ac1                         \n\t"
+          "addqh_r.w        %[st2],       %[st2],         %[st1]       \n\t" /* average even 1 */
+          "preceu.ph.qbr    %[p2],        %[qload1]                    \n\t"
+          "sb               %[st2],       0(%[dst])                    \n\t" /* store even 1 to dst */
+          "dpa.w.ph         $ac3,         %[p3],          %[filter45]  \n\t" /* even 3 */
+          "extp             %[Temp3],     $ac3,           31           \n\t" /* even 3 */
+          "lbux             %[st2],       %[Temp2](%[cm])              \n\t" /* even 1 */
+
+          /* even 4. pixel */
+          "mtlo             %[vector_64], $ac2                         \n\t" /* even 5 */
+          "mthi             $zero,        $ac2                         \n\t"
+          "addqh_r.w        %[qload3],    %[qload3],      %[st2]       \n\t" /* average even 2 */
+          "preceu.ph.qbl    %[p3],        %[qload1]                    \n\t"
+          "sb               %[qload3],    2(%[dst])                    \n\t" /* store even 2 to dst */
+          "lbu              %[qload3],    4(%[dst])                    \n\t" /* load even 3 from dst */
+          "lbu              %[qload1],    6(%[dst])                    \n\t" /* load even 4 from dst */
+          "dpa.w.ph         $ac1,         %[p4],          %[filter45]  \n\t" /* even 4 */
+          "extp             %[Temp1],     $ac1,           31           \n\t" /* even 4 */
+          "lbux             %[st3],       %[Temp3](%[cm])              \n\t" /* even 3 */
+
+          /* even 5. pixel */
+          "mtlo             %[vector_64], $ac3                         \n\t" /* even 6 */
+          "mthi             $zero,        $ac3                         \n\t"
+          "addqh_r.w        %[qload3],    %[qload3],      %[st3]       \n\t" /* average even 3 */
+          "sb               %[qload3],    4(%[dst])                    \n\t" /* store even 3 to dst */
+          "dpa.w.ph         $ac2,         %[p1],          %[filter45]  \n\t" /* even 5 */
+          "extp             %[Temp2],     $ac2,           31           \n\t" /* even 5 */
+          "lbux             %[st1],       %[Temp1](%[cm])              \n\t" /* even 4 */
+
+          /* even 6. pixel */
+          "mtlo             %[vector_64], $ac1                         \n\t" /* even 7 */
+          "mthi             $zero,        $ac1                         \n\t"
+          "addqh_r.w        %[qload1],    %[qload1],      %[st1]       \n\t" /* average even 4 */
+          "sb               %[qload1],    6(%[dst])                    \n\t" /* store even 4 to dst */
+          "dpa.w.ph         $ac3,         %[p5],          %[filter45]  \n\t" /* even 6 */
+          "lbu              %[qload2],    8(%[dst])                    \n\t" /* load even 5 from dst */
+          "extp             %[Temp3],     $ac3,           31           \n\t" /* even 6 */
+          "lbux             %[st2],       %[Temp2](%[cm])              \n\t" /* even 5 */
+
+          /* even 7. pixel */
+          "mtlo             %[vector_64], $ac2                         \n\t" /* even 8 */
+          "mthi             $zero,        $ac2                         \n\t"
+          "addqh_r.w        %[qload2],    %[qload2],      %[st2]       \n\t" /* average even 5 */
+          "sb               %[qload2],    8(%[dst])                    \n\t" /* store even 5 to dst */
+          "dpa.w.ph         $ac1,         %[p2],          %[filter45]  \n\t" /* even 7 */
+          "lbu              %[qload3],    10(%[dst])                   \n\t" /* load even 6 from dst */
+          "extp             %[Temp1],     $ac1,           31           \n\t" /* even 7 */
+          "lbux             %[st3],       %[Temp3](%[cm])              \n\t" /* even 6 */
+
+          "lbu              %[st2],       12(%[dst])                   \n\t" /* load even 7 from dst */
+
+          /* even 8. pixel */
+          "mtlo             %[vector_64], $ac3                         \n\t" /* odd 1 */
+          "mthi             $zero,        $ac3                         \n\t"
+          "addqh_r.w        %[qload3],    %[qload3],      %[st3]       \n\t" /* average even 6 */
+          "dpa.w.ph         $ac2,         %[p3],          %[filter45]  \n\t" /* even 8 */
+          "sb               %[qload3],    10(%[dst])                   \n\t" /* store even 6 to dst */
+          "extp             %[Temp2],     $ac2,           31           \n\t" /* even 8 */
+          "lbux             %[st1],       %[Temp1](%[cm])              \n\t" /* even 7 */
+
+          /* ODD pixels */
+          "ulw              %[qload1],    1(%[src])                   \n\t"
+          "ulw              %[qload2],    5(%[src])                    \n\t"
+
+          "addqh_r.w        %[st2],       %[st2],         %[st1]       \n\t" /* average even 7 */
+
+          /* odd 1. pixel */
+          "mtlo             %[vector_64], $ac1                         \n\t" /* odd 2 */
+          "mthi             $zero,        $ac1                         \n\t"
+          "preceu.ph.qbr    %[p1],        %[qload1]                    \n\t"
+          "preceu.ph.qbl    %[p2],        %[qload1]                    \n\t"
+          "preceu.ph.qbr    %[p3],        %[qload2]                    \n\t"
+          "preceu.ph.qbl    %[p4],        %[qload2]                    \n\t"
+          "sb               %[st2],       12(%[dst])                   \n\t" /* store even 7 to dst */
+          "ulw              %[qload3],    9(%[src])                    \n\t"
+          "dpa.w.ph         $ac3,         %[p1],          %[filter45]  \n\t" /* odd 1 */
+          "lbu              %[qload2],    14(%[dst])                   \n\t" /* load even 8 from dst */
+          "extp             %[Temp3],     $ac3,           31           \n\t" /* odd 1 */
+          "lbux             %[st2],       %[Temp2](%[cm])              \n\t" /* even 8 */
+
+          "lbu              %[st1],       1(%[dst])                    \n\t" /* load odd 1 from dst */
+
+          /* odd 2. pixel */
+          "mtlo             %[vector_64], $ac2                         \n\t" /* odd 3 */
+          "mthi             $zero,        $ac2                         \n\t"
+          "addqh_r.w        %[qload2],    %[qload2],      %[st2]       \n\t" /* average even 8 */
+          "preceu.ph.qbr    %[p1],        %[qload3]                    \n\t"
+          "preceu.ph.qbl    %[p5],        %[qload3]                    \n\t"
+          "sb               %[qload2],    14(%[dst])                   \n\t" /* store even 8 to dst */
+          "ulw              %[qload1],    13(%[src])                   \n\t"
+          "dpa.w.ph         $ac1,         %[p2],          %[filter45]  \n\t" /* odd 2 */
+          "lbu              %[qload3],    3(%[dst])                    \n\t" /* load odd 2 from dst */
+          "extp             %[Temp1],     $ac1,           31           \n\t" /* odd 2 */
+          "lbux             %[st3],       %[Temp3](%[cm])              \n\t" /* odd 1 */
+
+          /* odd 3. pixel */
+          "mtlo             %[vector_64], $ac3                         \n\t" /* odd 4 */
+          "mthi             $zero,        $ac3                         \n\t"
+          "addqh_r.w        %[st3],       %[st3],         %[st1]       \n\t" /* average odd 1 */
+          "preceu.ph.qbr    %[p2],        %[qload1]                    \n\t"
+          "dpa.w.ph         $ac2,         %[p3],          %[filter45]  \n\t" /* odd 3 */
+          "sb               %[st3],       1(%[dst])                    \n\t" /* store odd 1 to dst */
+          "extp             %[Temp2],     $ac2,           31           \n\t" /* odd 3 */
+          "lbux             %[st1],       %[Temp1](%[cm])              \n\t" /* odd 2 */
+
+          /* odd 4. pixel */
+          "mtlo             %[vector_64], $ac1                         \n\t" /* odd 5 */
+          "mthi             $zero,        $ac1                         \n\t"
+          "addqh_r.w        %[qload3],    %[qload3],      %[st1]       \n\t" /* average odd 2 */
+          "preceu.ph.qbl    %[p3],        %[qload1]                    \n\t"
+          "sb               %[qload3],    3(%[dst])                    \n\t" /* store odd 2 to dst */
+          "lbu              %[qload1],    5(%[dst])                    \n\t" /* load odd 3 from dst */
+          "dpa.w.ph         $ac3,         %[p4],          %[filter45]  \n\t" /* odd 4 */
+          "extp             %[Temp3],     $ac3,           31           \n\t" /* odd 4 */
+          "lbux             %[st2],       %[Temp2](%[cm])              \n\t" /* odd 3 */
+
+          "lbu              %[st1],       7(%[dst])                    \n\t" /* load odd 4 from dst */
+
+          /* odd 5. pixel */
+          "mtlo             %[vector_64], $ac2                         \n\t" /* odd 6 */
+          "mthi             $zero,        $ac2                         \n\t"
+          "addqh_r.w        %[qload1],    %[qload1],      %[st2]       \n\t" /* average odd 3 */
+          "sb               %[qload1],    5(%[dst])                    \n\t" /* store odd 3 to dst */
+          "dpa.w.ph         $ac1,         %[p1],          %[filter45]  \n\t" /* odd 5 */
+          "extp             %[Temp1],     $ac1,           31           \n\t" /* odd 5 */
+          "lbux             %[st3],       %[Temp3](%[cm])              \n\t" /* odd 4 */
+
+          "lbu              %[qload1],    9(%[dst])                    \n\t" /* load odd 5 from dst */
+
+          /* odd 6. pixel */
+          "mtlo             %[vector_64], $ac3                         \n\t" /* odd 7 */
+          "mthi             $zero,        $ac3                         \n\t"
+          "addqh_r.w        %[st1],       %[st1],         %[st3]       \n\t" /* average odd 4 */
+          "sb               %[st1],       7(%[dst])                    \n\t" /* store odd 4 to dst */
+          "dpa.w.ph         $ac2,         %[p5],          %[filter45]  \n\t" /* odd 6 */
+          "extp             %[Temp2],     $ac2,           31           \n\t" /* odd 6 */
+          "lbux             %[st1],       %[Temp1](%[cm])              \n\t" /* odd 5 */
+
+          /* odd 7. pixel */
+          "mtlo             %[vector_64], $ac1                         \n\t" /* odd 8 */
+          "mthi             $zero,        $ac1                         \n\t"
+          "addqh_r.w        %[qload1],    %[qload1],      %[st1]       \n\t" /* average odd 5 */
+          "sb               %[qload1],    9(%[dst])                    \n\t" /* store odd 5 to dst */
+          "lbu              %[qload2],    11(%[dst])                   \n\t" /* load odd 6 from dst */
+          "dpa.w.ph         $ac3,         %[p2],          %[filter45]  \n\t" /* odd 7 */
+          "extp             %[Temp3],     $ac3,           31           \n\t" /* odd 7 */
+
+          "lbu              %[qload3],    13(%[dst])                   \n\t" /* load odd 7 from dst */
+
+          /* odd 8. pixel */
+          "dpa.w.ph         $ac1,         %[p3],          %[filter45]  \n\t" /* odd 8 */
+          "extp             %[Temp1],     $ac1,           31           \n\t" /* odd 8 */
+
+          "lbu              %[qload1],    15(%[dst])                   \n\t" /* load odd 8 from dst */
+
+          "lbux             %[st2],       %[Temp2](%[cm])              \n\t" /* odd 6 */
+          "addqh_r.w        %[qload2],    %[qload2],      %[st2]       \n\t" /* average odd 6 */
+
+          "lbux             %[st3],       %[Temp3](%[cm])              \n\t" /* odd 7 */
+          "addqh_r.w        %[qload3],    %[qload3],      %[st3]       \n\t" /* average odd 7 */
+
+          "lbux             %[st1],       %[Temp1](%[cm])              \n\t" /* odd 8 */
+          "addqh_r.w        %[qload1],    %[qload1],      %[st1]       \n\t" /* average odd 8 */
+
+          "sb               %[qload2],    11(%[dst])                   \n\t" /* store odd 6 to dst */
+          "sb               %[qload3],    13(%[dst])                   \n\t" /* store odd 7 to dst */
+          "sb               %[qload1],    15(%[dst])                   \n\t" /* store odd 8 to dst */
+
+          : [qload1] "=&r" (qload1), [qload2] "=&r" (qload2),
+            [st1] "=&r" (st1), [st2] "=&r" (st2), [st3] "=&r" (st3),
+            [p1] "=&r" (p1), [p2] "=&r" (p2), [p3] "=&r" (p3), [p4] "=&r" (p4),
+            [qload3] "=&r" (qload3), [p5] "=&r" (p5),
+            [Temp1] "=&r" (Temp1), [Temp2] "=&r" (Temp2), [Temp3] "=&r" (Temp3)
+          : [filter45] "r" (filter45), [vector_64] "r" (vector_64),
+            [cm] "r" (cm), [dst] "r" (dst), [src] "r" (src)
+      );
+
+      src += 16;
+      dst += 16;
+    }
+
+    /* Next row... */
+    src_ptr += src_stride;
+    dst_ptr += dst_stride;
+  }
+}
+
+static void convolve_bi_avg_horiz_64_dspr2(const uint8_t *src_ptr,
+                                          int32_t src_stride,
+                                          uint8_t *dst_ptr,
+                                          int32_t dst_stride,
+                                          const int16_t *filter_x0,
+                                          int32_t h) {
+  int32_t y, c;
+  const uint8_t *src;
+  uint8_t *dst;
+  uint8_t *cm = vpx_ff_cropTbl;
+  uint32_t vector_64 = 64;
+  int32_t Temp1, Temp2, Temp3;
+  uint32_t qload1, qload2, qload3;
+  uint32_t p1, p2, p3, p4, p5;
+  uint32_t st1, st2, st3;
+  const int16_t *filter = &filter_x0[3];
+  uint32_t filter45;;
+
+  filter45 = ((const int32_t *)filter)[0];
+
+  for (y = h; y--;) {
+    src = src_ptr;
+    dst = dst_ptr;
+
+    /* prefetch data to cache memory */
+    prefetch_load(src_ptr + src_stride);
+    prefetch_load(src_ptr + src_stride + 32);
+    prefetch_load(src_ptr + src_stride + 64);
+    prefetch_store(dst_ptr + dst_stride);
+    prefetch_store(dst_ptr + dst_stride + 32);
+
+    for (c = 0; c < 4; c++) {
+      __asm__ __volatile__ (
+          "ulw              %[qload1],    0(%[src])                    \n\t"
+          "ulw              %[qload2],    4(%[src])                    \n\t"
+
+          /* even 1. pixel */
+          "mtlo             %[vector_64], $ac1                         \n\t" /* even 1 */
+          "mthi             $zero,        $ac1                         \n\t"
+          "mtlo             %[vector_64], $ac2                         \n\t" /* even 2 */
+          "mthi             $zero,        $ac2                         \n\t"
+          "preceu.ph.qbr    %[p1],        %[qload1]                    \n\t"
+          "preceu.ph.qbl    %[p2],        %[qload1]                    \n\t"
+          "preceu.ph.qbr    %[p3],        %[qload2]                    \n\t"
+          "preceu.ph.qbl    %[p4],        %[qload2]                    \n\t"
+          "ulw              %[qload3],    8(%[src])                    \n\t"
+          "dpa.w.ph         $ac1,         %[p1],          %[filter45]  \n\t" /* even 1 */
+          "extp             %[Temp1],     $ac1,           31           \n\t" /* even 1 */
+          "lbu              %[st2],       0(%[dst])                    \n\t" /* load even 1 from dst */
+
+          /* even 2. pixel */
+          "mtlo             %[vector_64], $ac3                         \n\t" /* even 3 */
+          "mthi             $zero,        $ac3                         \n\t"
+          "preceu.ph.qbr    %[p1],        %[qload3]                    \n\t"
+          "preceu.ph.qbl    %[p5],        %[qload3]                    \n\t"
+          "ulw              %[qload1],    12(%[src])                   \n\t"
+          "dpa.w.ph         $ac2,         %[p2],          %[filter45]  \n\t" /* even 1 */
+          "extp             %[Temp2],     $ac2,           31           \n\t" /* even 1 */
+          "lbux             %[st1],       %[Temp1](%[cm])              \n\t" /* even 1 */
+
+          "lbu              %[qload3],    2(%[dst])                    \n\t" /* load even 2 from dst */
+
+          /* even 3. pixel */
+          "mtlo             %[vector_64], $ac1                         \n\t" /* even 4 */
+          "mthi             $zero,        $ac1                         \n\t"
+          "addqh_r.w        %[st2],       %[st2],         %[st1]       \n\t" /* average even 1 */
+          "preceu.ph.qbr    %[p2],        %[qload1]                    \n\t"
+          "sb               %[st2],       0(%[dst])                    \n\t" /* store even 1 to dst */
+          "dpa.w.ph         $ac3,         %[p3],          %[filter45]  \n\t" /* even 3 */
+          "extp             %[Temp3],     $ac3,           31           \n\t" /* even 3 */
+          "lbux             %[st2],       %[Temp2](%[cm])              \n\t" /* even 1 */
+
+          /* even 4. pixel */
+          "mtlo             %[vector_64], $ac2                         \n\t" /* even 5 */
+          "mthi             $zero,        $ac2                         \n\t"
+          "addqh_r.w        %[qload3],    %[qload3],      %[st2]       \n\t" /* average even 2 */
+          "preceu.ph.qbl    %[p3],        %[qload1]                    \n\t"
+          "sb               %[qload3],    2(%[dst])                    \n\t" /* store even 2 to dst */
+          "lbu              %[qload3],    4(%[dst])                    \n\t" /* load even 3 from dst */
+          "lbu              %[qload1],    6(%[dst])                    \n\t" /* load even 4 from dst */
+          "dpa.w.ph         $ac1,         %[p4],          %[filter45]  \n\t" /* even 4 */
+          "extp             %[Temp1],     $ac1,           31           \n\t" /* even 4 */
+          "lbux             %[st3],       %[Temp3](%[cm])              \n\t" /* even 3 */
+
+          /* even 5. pixel */
+          "mtlo             %[vector_64], $ac3                         \n\t" /* even 6 */
+          "mthi             $zero,        $ac3                         \n\t"
+          "addqh_r.w        %[qload3],    %[qload3],      %[st3]       \n\t" /* average even 3 */
+          "sb               %[qload3],    4(%[dst])                    \n\t" /* store even 3 to dst */
+          "dpa.w.ph         $ac2,         %[p1],          %[filter45]  \n\t" /* even 5 */
+          "extp             %[Temp2],     $ac2,           31           \n\t" /* even 5 */
+          "lbux             %[st1],       %[Temp1](%[cm])              \n\t" /* even 4 */
+
+          /* even 6. pixel */
+          "mtlo             %[vector_64], $ac1                         \n\t" /* even 7 */
+          "mthi             $zero,        $ac1                         \n\t"
+          "addqh_r.w        %[qload1],    %[qload1],      %[st1]       \n\t" /* average even 4 */
+          "sb               %[qload1],    6(%[dst])                    \n\t" /* store even 4 to dst */
+          "dpa.w.ph         $ac3,         %[p5],          %[filter45]  \n\t" /* even 6 */
+          "lbu              %[qload2],    8(%[dst])                    \n\t" /* load even 5 from dst */
+          "extp             %[Temp3],     $ac3,           31           \n\t" /* even 6 */
+          "lbux             %[st2],       %[Temp2](%[cm])              \n\t" /* even 5 */
+
+          /* even 7. pixel */
+          "mtlo             %[vector_64], $ac2                         \n\t" /* even 8 */
+          "mthi             $zero,        $ac2                         \n\t"
+          "addqh_r.w        %[qload2],    %[qload2],      %[st2]       \n\t" /* average even 5 */
+          "sb               %[qload2],    8(%[dst])                    \n\t" /* store even 5 to dst */
+          "dpa.w.ph         $ac1,         %[p2],          %[filter45]  \n\t" /* even 7 */
+          "lbu              %[qload3],    10(%[dst])                   \n\t" /* load even 6 from dst */
+          "extp             %[Temp1],     $ac1,           31           \n\t" /* even 7 */
+          "lbux             %[st3],       %[Temp3](%[cm])              \n\t" /* even 6 */
+
+          "lbu              %[st2],       12(%[dst])                   \n\t" /* load even 7 from dst */
+
+          /* even 8. pixel */
+          "mtlo             %[vector_64], $ac3                         \n\t" /* odd 1 */
+          "mthi             $zero,        $ac3                         \n\t"
+          "addqh_r.w        %[qload3],    %[qload3],      %[st3]       \n\t" /* average even 6 */
+          "dpa.w.ph         $ac2,         %[p3],          %[filter45]  \n\t" /* even 8 */
+          "sb               %[qload3],    10(%[dst])                   \n\t" /* store even 6 to dst */
+          "extp             %[Temp2],     $ac2,           31           \n\t" /* even 8 */
+          "lbux             %[st1],       %[Temp1](%[cm])              \n\t" /* even 7 */
+
+          /* ODD pixels */
+          "ulw              %[qload1],    1(%[src])                   \n\t"
+          "ulw              %[qload2],    5(%[src])                    \n\t"
+
+          "addqh_r.w        %[st2],       %[st2],         %[st1]       \n\t" /* average even 7 */
+
+          /* odd 1. pixel */
+          "mtlo             %[vector_64], $ac1                         \n\t" /* odd 2 */
+          "mthi             $zero,        $ac1                         \n\t"
+          "preceu.ph.qbr    %[p1],        %[qload1]                    \n\t"
+          "preceu.ph.qbl    %[p2],        %[qload1]                    \n\t"
+          "preceu.ph.qbr    %[p3],        %[qload2]                    \n\t"
+          "preceu.ph.qbl    %[p4],        %[qload2]                    \n\t"
+          "sb               %[st2],       12(%[dst])                   \n\t" /* store even 7 to dst */
+          "ulw              %[qload3],    9(%[src])                    \n\t"
+          "dpa.w.ph         $ac3,         %[p1],          %[filter45]  \n\t" /* odd 1 */
+          "lbu              %[qload2],    14(%[dst])                   \n\t" /* load even 8 from dst */
+          "extp             %[Temp3],     $ac3,           31           \n\t" /* odd 1 */
+          "lbux             %[st2],       %[Temp2](%[cm])              \n\t" /* even 8 */
+
+          "lbu              %[st1],       1(%[dst])                    \n\t" /* load odd 1 from dst */
+
+          /* odd 2. pixel */
+          "mtlo             %[vector_64], $ac2                         \n\t" /* odd 3 */
+          "mthi             $zero,        $ac2                         \n\t"
+          "addqh_r.w        %[qload2],    %[qload2],      %[st2]       \n\t" /* average even 8 */
+          "preceu.ph.qbr    %[p1],        %[qload3]                    \n\t"
+          "preceu.ph.qbl    %[p5],        %[qload3]                    \n\t"
+          "sb               %[qload2],    14(%[dst])                   \n\t" /* store even 8 to dst */
+          "ulw              %[qload1],    13(%[src])                   \n\t"
+          "dpa.w.ph         $ac1,         %[p2],          %[filter45]  \n\t" /* odd 2 */
+          "lbu              %[qload3],    3(%[dst])                    \n\t" /* load odd 2 from dst */
+          "extp             %[Temp1],     $ac1,           31           \n\t" /* odd 2 */
+          "lbux             %[st3],       %[Temp3](%[cm])              \n\t" /* odd 1 */
+
+          /* odd 3. pixel */
+          "mtlo             %[vector_64], $ac3                         \n\t" /* odd 4 */
+          "mthi             $zero,        $ac3                         \n\t"
+          "addqh_r.w        %[st3],       %[st3],         %[st1]       \n\t" /* average odd 1 */
+          "preceu.ph.qbr    %[p2],        %[qload1]                    \n\t"
+          "dpa.w.ph         $ac2,         %[p3],          %[filter45]  \n\t" /* odd 3 */
+          "sb               %[st3],       1(%[dst])                    \n\t" /* store odd 1 to dst */
+          "extp             %[Temp2],     $ac2,           31           \n\t" /* odd 3 */
+          "lbux             %[st1],       %[Temp1](%[cm])              \n\t" /* odd 2 */
+
+          /* odd 4. pixel */
+          "mtlo             %[vector_64], $ac1                         \n\t" /* odd 5 */
+          "mthi             $zero,        $ac1                         \n\t"
+          "addqh_r.w        %[qload3],    %[qload3],      %[st1]       \n\t" /* average odd 2 */
+          "preceu.ph.qbl    %[p3],        %[qload1]                    \n\t"
+          "sb               %[qload3],    3(%[dst])                    \n\t" /* store odd 2 to dst */
+          "lbu              %[qload1],    5(%[dst])                    \n\t" /* load odd 3 from dst */
+          "dpa.w.ph         $ac3,         %[p4],          %[filter45]  \n\t" /* odd 4 */
+          "extp             %[Temp3],     $ac3,           31           \n\t" /* odd 4 */
+          "lbux             %[st2],       %[Temp2](%[cm])              \n\t" /* odd 3 */
+
+          "lbu              %[st1],       7(%[dst])                    \n\t" /* load odd 4 from dst */
+
+          /* odd 5. pixel */
+          "mtlo             %[vector_64], $ac2                         \n\t" /* odd 6 */
+          "mthi             $zero,        $ac2                         \n\t"
+          "addqh_r.w        %[qload1],    %[qload1],      %[st2]       \n\t" /* average odd 3 */
+          "sb               %[qload1],    5(%[dst])                    \n\t" /* store odd 3 to dst */
+          "dpa.w.ph         $ac1,         %[p1],          %[filter45]  \n\t" /* odd 5 */
+          "extp             %[Temp1],     $ac1,           31           \n\t" /* odd 5 */
+          "lbux             %[st3],       %[Temp3](%[cm])              \n\t" /* odd 4 */
+
+          "lbu              %[qload1],    9(%[dst])                    \n\t" /* load odd 5 from dst */
+
+          /* odd 6. pixel */
+          "mtlo             %[vector_64], $ac3                         \n\t" /* odd 7 */
+          "mthi             $zero,        $ac3                         \n\t"
+          "addqh_r.w        %[st1],       %[st1],         %[st3]       \n\t" /* average odd 4 */
+          "sb               %[st1],       7(%[dst])                    \n\t" /* store odd 4 to dst */
+          "dpa.w.ph         $ac2,         %[p5],          %[filter45]  \n\t" /* odd 6 */
+          "extp             %[Temp2],     $ac2,           31           \n\t" /* odd 6 */
+          "lbux             %[st1],       %[Temp1](%[cm])              \n\t" /* odd 5 */
+
+          /* odd 7. pixel */
+          "mtlo             %[vector_64], $ac1                         \n\t" /* odd 8 */
+          "mthi             $zero,        $ac1                         \n\t"
+          "addqh_r.w        %[qload1],    %[qload1],      %[st1]       \n\t" /* average odd 5 */
+          "sb               %[qload1],    9(%[dst])                    \n\t" /* store odd 5 to dst */
+          "lbu              %[qload2],    11(%[dst])                   \n\t" /* load odd 6 from dst */
+          "dpa.w.ph         $ac3,         %[p2],          %[filter45]  \n\t" /* odd 7 */
+          "extp             %[Temp3],     $ac3,           31           \n\t" /* odd 7 */
+
+          "lbu              %[qload3],    13(%[dst])                   \n\t" /* load odd 7 from dst */
+
+          /* odd 8. pixel */
+          "dpa.w.ph         $ac1,         %[p3],          %[filter45]  \n\t" /* odd 8 */
+          "extp             %[Temp1],     $ac1,           31           \n\t" /* odd 8 */
+
+          "lbu              %[qload1],    15(%[dst])                   \n\t" /* load odd 8 from dst */
+
+          "lbux             %[st2],       %[Temp2](%[cm])              \n\t" /* odd 6 */
+          "addqh_r.w        %[qload2],    %[qload2],      %[st2]       \n\t" /* average odd 6 */
+
+          "lbux             %[st3],       %[Temp3](%[cm])              \n\t" /* odd 7 */
+          "addqh_r.w        %[qload3],    %[qload3],      %[st3]       \n\t" /* average odd 7 */
+
+          "lbux             %[st1],       %[Temp1](%[cm])              \n\t" /* odd 8 */
+          "addqh_r.w        %[qload1],    %[qload1],      %[st1]       \n\t" /* average odd 8 */
+
+          "sb               %[qload2],    11(%[dst])                   \n\t" /* store odd 6 to dst */
+          "sb               %[qload3],    13(%[dst])                   \n\t" /* store odd 7 to dst */
+          "sb               %[qload1],    15(%[dst])                   \n\t" /* store odd 8 to dst */
+
+          : [qload1] "=&r" (qload1), [qload2] "=&r" (qload2),
+            [st1] "=&r" (st1), [st2] "=&r" (st2), [st3] "=&r" (st3),
+            [p1] "=&r" (p1), [p2] "=&r" (p2), [p3] "=&r" (p3), [p4] "=&r" (p4),
+            [qload3] "=&r" (qload3), [p5] "=&r" (p5),
+            [Temp1] "=&r" (Temp1), [Temp2] "=&r" (Temp2), [Temp3] "=&r" (Temp3)
+          : [filter45] "r" (filter45), [vector_64] "r" (vector_64),
+            [cm] "r" (cm), [dst] "r" (dst), [src] "r" (src)
+      );
+
+      src += 16;
+      dst += 16;
+    }
+
+    /* Next row... */
+    src_ptr += src_stride;
+    dst_ptr += dst_stride;
+  }
+}
+
+void vpx_convolve2_avg_horiz_dspr2(const uint8_t *src, ptrdiff_t src_stride,
+                                   uint8_t *dst, ptrdiff_t dst_stride,
+                                   const int16_t *filter_x, int x_step_q4,
+                                   const int16_t *filter_y, int y_step_q4,
+                                   int w, int h) {
+  if (16 == x_step_q4) {
+    uint32_t pos = 38;
+
+    /* bit positon for extract from acc */
+    __asm__ __volatile__ (
+      "wrdsp      %[pos],     1           \n\t"
+      :
+      : [pos] "r" (pos)
+    );
+
+    /* prefetch data to cache memory */
+    prefetch_load(src);
+    prefetch_load(src + 32);
+    prefetch_store(dst);
+
+    switch (w) {
+      case 4:
+        convolve_bi_avg_horiz_4_dspr2(src, src_stride,
+                                     dst, dst_stride,
+                                     filter_x, h);
+        break;
+      case 8:
+        convolve_bi_avg_horiz_8_dspr2(src, src_stride,
+                                     dst, dst_stride,
+                                     filter_x, h);
+        break;
+      case 16:
+        convolve_bi_avg_horiz_16_dspr2(src, src_stride,
+                                      dst, dst_stride,
+                                      filter_x, h, 1);
+        break;
+      case 32:
+        convolve_bi_avg_horiz_16_dspr2(src, src_stride,
+                                      dst, dst_stride,
+                                      filter_x, h, 2);
+        break;
+      case 64:
+        prefetch_load(src + 64);
+        prefetch_store(dst + 32);
+
+        convolve_bi_avg_horiz_64_dspr2(src, src_stride,
+                                      dst, dst_stride,
+                                      filter_x, h);
+        break;
+      default:
+        vpx_convolve8_avg_horiz_c(src, src_stride,
+                                  dst, dst_stride,
+                                  filter_x, x_step_q4,
+                                  filter_y, y_step_q4,
+                                  w, h);
+        break;
+    }
+  } else {
+    vpx_convolve8_avg_horiz_c(src, src_stride,
+                              dst, dst_stride,
+                              filter_x, x_step_q4,
+                              filter_y, y_step_q4,
+                              w, h);
+  }
+}
+#endif
--- /dev/null
+++ b/vpx_dsp/mips/convolve2_dspr2.c
@@ -1,0 +1,782 @@
+/*
+ *  Copyright (c) 2013 The WebM project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <assert.h>
+#include <stdio.h>
+
+#include "./vpx_dsp_rtcd.h"
+#include "vpx_dsp/mips/convolve_common_dspr2.h"
+#include "vpx_dsp/vpx_dsp_common.h"
+#include "vpx_dsp/vpx_filter.h"
+#include "vpx_ports/mem.h"
+
+#if HAVE_DSPR2
+static void convolve_bi_horiz_4_transposed_dspr2(const uint8_t *src,
+                                                 int32_t src_stride,
+                                                 uint8_t *dst,
+                                                 int32_t dst_stride,
+                                                 const int16_t *filter_x0,
+                                                 int32_t h) {
+  int32_t       y;
+  uint8_t       *cm = vpx_ff_cropTbl;
+  uint8_t       *dst_ptr;
+  int32_t       Temp1, Temp2;
+  uint32_t      vector4a = 64;
+  uint32_t      tp1, tp2;
+  uint32_t      p1, p2;
+  const int16_t *filter = &filter_x0[3];
+  uint32_t      filter45;
+
+  filter45 = ((const int32_t *)filter)[0];
+
+  for (y = h; y--;) {
+    dst_ptr = dst;
+    /* prefetch data to cache memory */
+    prefetch_load(src + src_stride);
+    prefetch_load(src + src_stride + 32);
+
+    __asm__ __volatile__ (
+        "ulw              %[tp1],         0(%[src])                      \n\t"
+        "ulw              %[tp2],         4(%[src])                      \n\t"
+
+        /* even 1. pixel */
+        "mtlo             %[vector4a],    $ac3                           \n\t"
+        "mthi             $zero,          $ac3                           \n\t"
+        "preceu.ph.qbr    %[p1],          %[tp1]                         \n\t"
+        "preceu.ph.qbl    %[p2],          %[tp1]                         \n\t"
+        "dpa.w.ph         $ac3,           %[p1],          %[filter45]    \n\t"
+        "extp             %[Temp1],       $ac3,           31             \n\t"
+
+        /* even 2. pixel */
+        "mtlo             %[vector4a],    $ac2                           \n\t"
+        "mthi             $zero,          $ac2                           \n\t"
+        "balign           %[tp2],         %[tp1],         3              \n\t"
+        "dpa.w.ph         $ac2,           %[p2],          %[filter45]    \n\t"
+        "extp             %[Temp2],       $ac2,           31             \n\t"
+
+        /* odd 1. pixel */
+        "lbux             %[tp1],         %[Temp1](%[cm])                \n\t"
+        "mtlo             %[vector4a],    $ac3                           \n\t"
+        "mthi             $zero,          $ac3                           \n\t"
+        "preceu.ph.qbr    %[p1],          %[tp2]                         \n\t"
+        "preceu.ph.qbl    %[p2],          %[tp2]                         \n\t"
+        "dpa.w.ph         $ac3,           %[p1],          %[filter45]    \n\t"
+        "extp             %[Temp1],       $ac3,           31             \n\t"
+
+        /* odd 2. pixel */
+        "lbux             %[tp2],         %[Temp2](%[cm])                \n\t"
+        "mtlo             %[vector4a],    $ac2                           \n\t"
+        "mthi             $zero,          $ac2                           \n\t"
+        "dpa.w.ph         $ac2,           %[p2],          %[filter45]    \n\t"
+        "extp             %[Temp2],       $ac2,           31             \n\t"
+
+        /* clamp */
+        "lbux             %[p1],          %[Temp1](%[cm])                \n\t"
+        "lbux             %[p2],          %[Temp2](%[cm])                \n\t"
+
+        /* store bytes */
+        "sb               %[tp1],         0(%[dst_ptr])                  \n\t"
+        "addu             %[dst_ptr],     %[dst_ptr],     %[dst_stride]  \n\t"
+
+        "sb               %[p1],          0(%[dst_ptr])                  \n\t"
+        "addu             %[dst_ptr],     %[dst_ptr],     %[dst_stride]  \n\t"
+
+        "sb               %[tp2],         0(%[dst_ptr])                  \n\t"
+        "addu             %[dst_ptr],     %[dst_ptr],     %[dst_stride]  \n\t"
+
+        "sb               %[p2],          0(%[dst_ptr])                  \n\t"
+        "addu             %[dst_ptr],     %[dst_ptr],     %[dst_stride]  \n\t"
+
+        : [tp1] "=&r" (tp1), [tp2] "=&r" (tp2),
+          [p1] "=&r" (p1), [p2] "=&r" (p2),
+          [Temp1] "=&r" (Temp1), [Temp2] "=&r" (Temp2),
+          [dst_ptr] "+r" (dst_ptr)
+        : [filter45] "r" (filter45),[vector4a] "r" (vector4a),
+          [cm] "r" (cm), [src] "r" (src), [dst_stride] "r" (dst_stride)
+    );
+
+    /* Next row... */
+    src += src_stride;
+    dst += 1;
+  }
+}
+
+static void convolve_bi_horiz_8_transposed_dspr2(const uint8_t *src,
+                                                 int32_t src_stride,
+                                                 uint8_t *dst,
+                                                 int32_t dst_stride,
+                                                 const int16_t *filter_x0,
+                                                 int32_t h) {
+  int32_t y;
+  uint8_t *cm = vpx_ff_cropTbl;
+  uint8_t *dst_ptr;
+  uint32_t vector4a = 64;
+  int32_t Temp1, Temp2, Temp3;
+  uint32_t tp1, tp2, tp3;
+  uint32_t p1, p2, p3, p4;
+  uint8_t *odd_dst;
+  uint32_t dst_pitch_2 = (dst_stride << 1);
+  const int16_t *filter = &filter_x0[3];
+  uint32_t      filter45;
+
+  filter45 = ((const int32_t *)filter)[0];
+
+  for (y = h; y--;) {
+    /* prefetch data to cache memory */
+    prefetch_load(src + src_stride);
+    prefetch_load(src + src_stride + 32);
+
+    dst_ptr = dst;
+    odd_dst = (dst_ptr + dst_stride);
+
+    __asm__ __volatile__ (
+        "ulw              %[tp1],         0(%[src])                       \n\t"
+        "ulw              %[tp2],         4(%[src])                       \n\t"
+
+        /* even 1. pixel */
+        "mtlo             %[vector4a],    $ac3                            \n\t"
+        "mthi             $zero,          $ac3                            \n\t"
+        "mtlo             %[vector4a],    $ac2                            \n\t"
+        "mthi             $zero,          $ac2                            \n\t"
+        "preceu.ph.qbr    %[p1],          %[tp1]                          \n\t"
+        "preceu.ph.qbl    %[p2],          %[tp1]                          \n\t"
+        "preceu.ph.qbr    %[p3],          %[tp2]                          \n\t"
+        "preceu.ph.qbl    %[p4],          %[tp2]                          \n\t"
+        "ulw              %[tp3],         8(%[src])                       \n\t"
+        "dpa.w.ph         $ac3,           %[p1],          %[filter45]     \n\t"
+        "extp             %[Temp1],       $ac3,           31              \n\t"
+
+        /* even 2. pixel */
+        "dpa.w.ph         $ac2,           %[p2],          %[filter45]     \n\t"
+        "extp             %[Temp3],       $ac2,           31              \n\t"
+
+        /* even 3. pixel */
+        "lbux             %[Temp2],       %[Temp1](%[cm])                 \n\t"
+        "mtlo             %[vector4a],    $ac1                            \n\t"
+        "mthi             $zero,          $ac1                            \n\t"
+        "balign           %[tp3],         %[tp2],         3              \n\t"
+        "balign           %[tp2],         %[tp1],         3              \n\t"
+        "dpa.w.ph         $ac1,           %[p3],          %[filter45]     \n\t"
+        "lbux             %[tp1],         %[Temp3](%[cm])                 \n\t"
+        "extp             %[p3],          $ac1,           31              \n\t"
+
+        /* even 4. pixel */
+        "mtlo             %[vector4a],    $ac2                            \n\t"
+        "mthi             $zero,          $ac2                            \n\t"
+        "mtlo             %[vector4a],    $ac3                            \n\t"
+        "mthi             $zero,          $ac3                            \n\t"
+        "sb               %[Temp2],       0(%[dst_ptr])                   \n\t"
+        "addu             %[dst_ptr],     %[dst_ptr],     %[dst_pitch_2]  \n\t"
+        "sb               %[tp1],         0(%[dst_ptr])                   \n\t"
+        "addu             %[dst_ptr],     %[dst_ptr],     %[dst_pitch_2]  \n\t"
+
+        "dpa.w.ph         $ac2,           %[p4],          %[filter45]     \n\t"
+        "extp             %[Temp3],       $ac2,           31              \n\t"
+
+        "lbux             %[Temp1],         %[p3](%[cm])                    \n\t"
+
+        /* odd 1. pixel */
+        "mtlo             %[vector4a],    $ac1                            \n\t"
+        "mthi             $zero,          $ac1                            \n\t"
+        "preceu.ph.qbr    %[p1],          %[tp2]                          \n\t"
+        "preceu.ph.qbl    %[p2],          %[tp2]                          \n\t"
+        "preceu.ph.qbr    %[p3],          %[tp3]                          \n\t"
+        "preceu.ph.qbl    %[p4],          %[tp3]                          \n\t"
+        "sb               %[Temp1],       0(%[dst_ptr])                   \n\t"
+        "addu             %[dst_ptr],     %[dst_ptr],     %[dst_pitch_2]  \n\t"
+
+        "dpa.w.ph         $ac3,           %[p1],          %[filter45]     \n\t"
+        "extp             %[Temp2],       $ac3,           31              \n\t"
+
+        /* odd 2. pixel */
+        "lbux             %[tp1],         %[Temp3](%[cm])                 \n\t"
+        "mtlo             %[vector4a],    $ac3                            \n\t"
+        "mthi             $zero,          $ac3                            \n\t"
+        "mtlo             %[vector4a],    $ac2                            \n\t"
+        "mthi             $zero,          $ac2                            \n\t"
+        "dpa.w.ph         $ac1,           %[p2],          %[filter45]     \n\t"
+        "sb               %[tp1],         0(%[dst_ptr])                   \n\t"
+        "addu             %[dst_ptr],     %[dst_ptr],     %[dst_pitch_2]  \n\t"
+        "extp             %[Temp3],       $ac1,           31              \n\t"
+
+        /* odd 3. pixel */
+        "lbux             %[tp3],         %[Temp2](%[cm])                 \n\t"
+        "dpa.w.ph         $ac3,           %[p3],          %[filter45]     \n\t"
+        "extp             %[Temp2],       $ac3,           31              \n\t"
+
+        /* odd 4. pixel */
+        "sb               %[tp3],         0(%[odd_dst])                   \n\t"
+        "addu             %[odd_dst],     %[odd_dst],     %[dst_pitch_2]  \n\t"
+        "dpa.w.ph         $ac2,           %[p4],          %[filter45]     \n\t"
+        "extp             %[Temp1],       $ac2,           31              \n\t"
+
+        /* clamp */
+        "lbux             %[p4],          %[Temp3](%[cm])                 \n\t"
+        "lbux             %[p2],          %[Temp2](%[cm])                 \n\t"
+        "lbux             %[p1],          %[Temp1](%[cm])                 \n\t"
+
+        /* store bytes */
+        "sb               %[p4],          0(%[odd_dst])                   \n\t"
+        "addu             %[odd_dst],     %[odd_dst],     %[dst_pitch_2]  \n\t"
+
+        "sb               %[p2],          0(%[odd_dst])                   \n\t"
+        "addu             %[odd_dst],     %[odd_dst],     %[dst_pitch_2]  \n\t"
+
+        "sb               %[p1],          0(%[odd_dst])                   \n\t"
+
+        : [tp1] "=&r" (tp1), [tp2] "=&r" (tp2), [tp3] "=&r" (tp3),
+          [p1] "=&r" (p1), [p2] "=&r" (p2), [p3] "=&r" (p3), [p4] "=&r" (p4),
+          [Temp1] "=&r" (Temp1), [Temp2] "=&r" (Temp2), [Temp3] "=&r" (Temp3),
+          [dst_ptr] "+r" (dst_ptr), [odd_dst] "+r" (odd_dst)
+        : [filter45] "r" (filter45),[vector4a] "r" (vector4a), [cm] "r" (cm),
+          [src] "r" (src), [dst_pitch_2] "r" (dst_pitch_2)
+    );
+
+    /* Next row... */
+    src += src_stride;
+    dst += 1;
+  }
+}
+
+static void convolve_bi_horiz_16_transposed_dspr2(const uint8_t *src_ptr,
+                                                  int32_t src_stride,
+                                                  uint8_t *dst_ptr,
+                                                  int32_t dst_stride,
+                                                  const int16_t *filter_x0,
+                                                  int32_t h,
+                                                  int32_t count) {
+  int32_t       c, y;
+  const uint8_t *src;
+  uint8_t       *dst;
+  uint8_t       *cm = vpx_ff_cropTbl;
+  uint32_t      vector_64 = 64;
+  int32_t       Temp1, Temp2, Temp3;
+  uint32_t      qload1, qload2;
+  uint32_t      p1, p2, p3, p4, p5;
+  uint32_t      st1, st2, st3;
+  uint32_t      dst_pitch_2 = (dst_stride << 1);
+  uint8_t       *odd_dst;
+  const int16_t *filter = &filter_x0[3];
+  uint32_t      filter45;
+
+  filter45 = ((const int32_t *)filter)[0];
+
+  for (y = h; y--;) {
+    /* prefetch data to cache memory */
+    prefetch_load(src_ptr + src_stride);
+    prefetch_load(src_ptr + src_stride + 32);
+
+    src = src_ptr;
+    dst = dst_ptr;
+
+    odd_dst = (dst + dst_stride);
+
+    for (c = 0; c < count; c++) {
+      __asm__ __volatile__ (
+          "ulw              %[qload1],        0(%[src])                       \n\t"
+          "ulw              %[qload2],        4(%[src])                       \n\t"
+
+          /* even 1. pixel */
+          "mtlo             %[vector_64],     $ac1                            \n\t" /* even 1 */
+          "mthi             $zero,            $ac1                            \n\t"
+          "mtlo             %[vector_64],     $ac2                            \n\t" /* even 2 */
+          "mthi             $zero,            $ac2                            \n\t"
+          "preceu.ph.qbr    %[p1],            %[qload1]                       \n\t"
+          "preceu.ph.qbl    %[p2],            %[qload1]                       \n\t"
+          "preceu.ph.qbr    %[p3],            %[qload2]                       \n\t"
+          "preceu.ph.qbl    %[p4],            %[qload2]                       \n\t"
+          "ulw              %[qload1],        8(%[src])                       \n\t"
+          "dpa.w.ph         $ac1,             %[p1],          %[filter45]     \n\t" /* even 1 */
+          "extp             %[Temp1],         $ac1,           31              \n\t" /* even 1 */
+
+          /* even 2. pixel */
+          "mtlo             %[vector_64],     $ac3                            \n\t" /* even 3 */
+          "mthi             $zero,            $ac3                            \n\t"
+          "preceu.ph.qbr    %[p1],            %[qload1]                       \n\t"
+          "preceu.ph.qbl    %[p5],            %[qload1]                       \n\t"
+          "ulw              %[qload2],        12(%[src])                      \n\t"
+          "dpa.w.ph         $ac2,             %[p2],          %[filter45]     \n\t" /* even 1 */
+          "lbux             %[st1],           %[Temp1](%[cm])                 \n\t" /* even 1 */
+          "extp             %[Temp2],         $ac2,           31              \n\t" /* even 1 */
+
+          /* even 3. pixel */
+          "mtlo             %[vector_64],     $ac1                            \n\t" /* even 4 */
+          "mthi             $zero,            $ac1                            \n\t"
+          "preceu.ph.qbr    %[p2],            %[qload2]                       \n\t"
+          "sb               %[st1],           0(%[dst])                       \n\t" /* even 1 */
+          "addu             %[dst],           %[dst],         %[dst_pitch_2]             \n\t"
+          "dpa.w.ph         $ac3,             %[p3],          %[filter45]     \n\t" /* even 3 */
+          "extp             %[Temp3],         $ac3,           31              \n\t" /* even 3 */
+          "lbux             %[st2],           %[Temp2](%[cm])                 \n\t" /* even 1 */
+
+          /* even 4. pixel */
+          "mtlo             %[vector_64],     $ac2                            \n\t" /* even 5 */
+          "mthi             $zero,            $ac2                            \n\t"
+          "preceu.ph.qbl    %[p3],            %[qload2]                       \n\t"
+          "sb               %[st2],           0(%[dst])                       \n\t" /* even 2 */
+          "addu             %[dst],           %[dst],         %[dst_pitch_2]  \n\t"
+          "dpa.w.ph         $ac1,             %[p4],          %[filter45]     \n\t" /* even 4 */
+          "extp             %[Temp1],         $ac1,           31              \n\t" /* even 4 */
+          "lbux             %[st3],           %[Temp3](%[cm])                 \n\t" /* even 3 */
+
+          /* even 5. pixel */
+          "mtlo             %[vector_64],     $ac3                            \n\t" /* even 6 */
+          "mthi             $zero,            $ac3                            \n\t"
+          "sb               %[st3],           0(%[dst])                       \n\t" /* even 3 */
+          "addu             %[dst],           %[dst],         %[dst_pitch_2]  \n\t"
+          "dpa.w.ph         $ac2,             %[p1],          %[filter45]     \n\t" /* even 5 */
+          "extp             %[Temp2],         $ac2,           31              \n\t" /* even 5 */
+          "lbux             %[st1],           %[Temp1](%[cm])                 \n\t" /* even 4 */
+
+          /* even 6. pixel */
+          "mtlo             %[vector_64],     $ac1                            \n\t" /* even 7 */
+          "mthi             $zero,            $ac1                            \n\t"
+          "sb               %[st1],           0(%[dst])                       \n\t" /* even 4 */
+          "addu             %[dst],           %[dst],         %[dst_pitch_2]  \n\t"
+          "ulw              %[qload1],        20(%[src])                      \n\t"
+          "dpa.w.ph         $ac3,             %[p5],          %[filter45]     \n\t" /* even 6 */
+          "extp             %[Temp3],         $ac3,           31              \n\t" /* even 6 */
+          "lbux             %[st2],           %[Temp2](%[cm])                 \n\t" /* even 5 */
+
+          /* even 7. pixel */
+          "mtlo             %[vector_64],     $ac2                            \n\t" /* even 8 */
+          "mthi             $zero,            $ac2                            \n\t"
+          "preceu.ph.qbr    %[p5],            %[qload1]                       \n\t"
+          "sb               %[st2],           0(%[dst])                       \n\t" /* even 5 */
+          "addu             %[dst],           %[dst],         %[dst_pitch_2]  \n\t"
+          "dpa.w.ph         $ac1,             %[p2],          %[filter45]     \n\t" /* even 7 */
+          "extp             %[Temp1],         $ac1,           31              \n\t" /* even 7 */
+          "lbux             %[st3],           %[Temp3](%[cm])                 \n\t" /* even 6 */
+
+          /* even 8. pixel */
+          "mtlo             %[vector_64],     $ac3                            \n\t" /* odd 1 */
+          "mthi             $zero,            $ac3                            \n\t"
+          "dpa.w.ph         $ac2,             %[p3],          %[filter45]     \n\t" /* even 8 */
+          "sb               %[st3],           0(%[dst])                       \n\t" /* even 6 */
+          "addu             %[dst],           %[dst],         %[dst_pitch_2]  \n\t"
+          "extp             %[Temp2],         $ac2,           31              \n\t" /* even 8 */
+          "lbux             %[st1],           %[Temp1](%[cm])                 \n\t" /* even 7 */
+
+          /* ODD pixels */
+          "ulw              %[qload1],        1(%[src])                       \n\t"
+          "ulw              %[qload2],        5(%[src])                       \n\t"
+
+          /* odd 1. pixel */
+          "mtlo             %[vector_64],     $ac1                            \n\t" /* odd 2 */
+          "mthi             $zero,            $ac1                            \n\t"
+          "preceu.ph.qbr    %[p1],            %[qload1]                       \n\t"
+          "preceu.ph.qbl    %[p2],            %[qload1]                       \n\t"
+          "preceu.ph.qbr    %[p3],            %[qload2]                       \n\t"
+          "preceu.ph.qbl    %[p4],            %[qload2]                       \n\t"
+          "sb               %[st1],           0(%[dst])                       \n\t" /* even 7 */
+          "addu             %[dst],           %[dst],         %[dst_pitch_2]  \n\t"
+          "ulw              %[qload2],        9(%[src])                       \n\t"
+          "dpa.w.ph         $ac3,             %[p1],          %[filter45]     \n\t" /* odd 1 */
+          "extp             %[Temp3],         $ac3,           31              \n\t" /* odd 1 */
+          "lbux             %[st2],           %[Temp2](%[cm])                 \n\t" /* even 8 */
+
+          /* odd 2. pixel */
+          "mtlo             %[vector_64],     $ac2                            \n\t" /* odd 3 */
+          "mthi             $zero,            $ac2                            \n\t"
+          "preceu.ph.qbr    %[p1],            %[qload2]                       \n\t"
+          "preceu.ph.qbl    %[p5],            %[qload2]                       \n\t"
+          "sb               %[st2],           0(%[dst])                       \n\t" /* even 8 */
+          "ulw              %[qload1],        13(%[src])                      \n\t"
+          "dpa.w.ph         $ac1,             %[p2],          %[filter45]     \n\t" /* odd 2 */
+          "extp             %[Temp1],         $ac1,           31              \n\t" /* odd 2 */
+          "lbux             %[st3],           %[Temp3](%[cm])                 \n\t" /* odd 1 */
+
+          /* odd 3. pixel */
+          "mtlo             %[vector_64],     $ac3                            \n\t" /* odd 4 */
+          "mthi             $zero,            $ac3                            \n\t"
+          "preceu.ph.qbr    %[p2],            %[qload1]                       \n\t"
+          "sb               %[st3],           0(%[odd_dst])                   \n\t" /* odd 1 */
+          "addu             %[odd_dst],       %[odd_dst],     %[dst_pitch_2]  \n\t"
+          "dpa.w.ph         $ac2,             %[p3],          %[filter45]     \n\t" /* odd 3 */
+          "extp             %[Temp2],         $ac2,           31              \n\t" /* odd 3 */
+          "lbux             %[st1],           %[Temp1](%[cm])                 \n\t" /* odd 2 */
+
+          /* odd 4. pixel */
+          "mtlo             %[vector_64],     $ac1                            \n\t" /* odd 5 */
+          "mthi             $zero,            $ac1                            \n\t"
+          "preceu.ph.qbl    %[p3],            %[qload1]                       \n\t"
+          "sb               %[st1],           0(%[odd_dst])                   \n\t" /* odd 2 */
+          "addu             %[odd_dst],       %[odd_dst],     %[dst_pitch_2]  \n\t"
+          "dpa.w.ph         $ac3,             %[p4],          %[filter45]     \n\t" /* odd 4 */
+          "extp             %[Temp3],         $ac3,           31              \n\t" /* odd 4 */
+          "lbux             %[st2],           %[Temp2](%[cm])                 \n\t" /* odd 3 */
+
+          /* odd 5. pixel */
+          "mtlo             %[vector_64],     $ac2                            \n\t" /* odd 6 */
+          "mthi             $zero,            $ac2                            \n\t"
+          "sb               %[st2],           0(%[odd_dst])                   \n\t" /* odd 3 */
+          "addu             %[odd_dst],       %[odd_dst],     %[dst_pitch_2]  \n\t"
+          "dpa.w.ph         $ac1,             %[p1],          %[filter45]     \n\t" /* odd 5 */
+          "extp             %[Temp1],         $ac1,           31              \n\t" /* odd 5 */
+          "lbux             %[st3],           %[Temp3](%[cm])                 \n\t" /* odd 4 */
+
+          /* odd 6. pixel */
+          "mtlo             %[vector_64],     $ac3                            \n\t" /* odd 7 */
+          "mthi             $zero,            $ac3                            \n\t"
+          "sb               %[st3],           0(%[odd_dst])                   \n\t" /* odd 4 */
+          "addu             %[odd_dst],       %[odd_dst],     %[dst_pitch_2]  \n\t"
+          "ulw              %[qload1],        21(%[src])                      \n\t"
+          "dpa.w.ph         $ac2,             %[p5],          %[filter45]     \n\t" /* odd 6 */
+          "extp             %[Temp2],         $ac2,           31              \n\t" /* odd 6 */
+          "lbux             %[st1],           %[Temp1](%[cm])                 \n\t" /* odd 5 */
+
+          /* odd 7. pixel */
+          "mtlo             %[vector_64],     $ac1                            \n\t" /* odd 8 */
+          "mthi             $zero,            $ac1                            \n\t"
+          "preceu.ph.qbr    %[p5],            %[qload1]                       \n\t"
+          "sb               %[st1],           0(%[odd_dst])                   \n\t" /* odd 5 */
+          "addu             %[odd_dst],       %[odd_dst],     %[dst_pitch_2]  \n\t"
+          "dpa.w.ph         $ac3,             %[p2],          %[filter45]     \n\t" /* odd 7 */
+          "extp             %[Temp3],         $ac3,           31              \n\t" /* odd 7 */
+
+          /* odd 8. pixel */
+          "dpa.w.ph         $ac1,             %[p3],          %[filter45]     \n\t" /* odd 8 */
+          "extp             %[Temp1],         $ac1,           31              \n\t" /* odd 8 */
+
+          "lbux             %[st2],           %[Temp2](%[cm])                 \n\t" /* odd 6 */
+          "lbux             %[st3],           %[Temp3](%[cm])                 \n\t" /* odd 7 */
+          "lbux             %[st1],           %[Temp1](%[cm])                 \n\t" /* odd 8 */
+
+          "sb               %[st2],           0(%[odd_dst])                   \n\t" /* odd 6 */
+          "addu             %[odd_dst],       %[odd_dst],     %[dst_pitch_2]  \n\t"
+
+          "sb               %[st3],           0(%[odd_dst])                   \n\t" /* odd 7 */
+          "addu             %[odd_dst],       %[odd_dst],     %[dst_pitch_2]  \n\t"
+
+          "sb               %[st1],           0(%[odd_dst])                   \n\t" /* odd 8 */
+
+          : [qload1] "=&r" (qload1), [qload2] "=&r" (qload2), [p5] "=&r" (p5),
+            [st1] "=&r" (st1), [st2] "=&r" (st2), [st3] "=&r" (st3),
+            [p1] "=&r" (p1), [p2] "=&r" (p2), [p3] "=&r" (p3), [p4] "=&r" (p4),
+            [Temp1] "=&r" (Temp1), [Temp2] "=&r" (Temp2), [Temp3] "=&r" (Temp3),
+            [dst] "+r" (dst), [odd_dst] "+r" (odd_dst)
+          : [filter45] "r" (filter45), [vector_64] "r" (vector_64),
+            [cm] "r" (cm),
+            [src] "r" (src), [dst_pitch_2] "r" (dst_pitch_2)
+      );
+
+      src += 16;
+      dst = (dst_ptr + ((c + 1) * 16 * dst_stride));
+      odd_dst = (dst + dst_stride);
+    }
+
+    /* Next row... */
+    src_ptr += src_stride;
+    dst_ptr += 1;
+  }
+}
+
+static void convolve_bi_horiz_64_transposed_dspr2(const uint8_t *src_ptr,
+                                                  int32_t src_stride,
+                                                  uint8_t *dst_ptr,
+                                                  int32_t dst_stride,
+                                                  const int16_t *filter_x0,
+                                                  int32_t h) {
+  int32_t       c, y;
+  const uint8_t *src;
+  uint8_t       *dst;
+  uint8_t       *cm = vpx_ff_cropTbl;
+  uint32_t      vector_64 = 64;
+  int32_t       Temp1, Temp2, Temp3;
+  uint32_t      qload1, qload2;
+  uint32_t      p1, p2, p3, p4, p5;
+  uint32_t      st1, st2, st3;
+  uint32_t      dst_pitch_2 = (dst_stride << 1);
+  uint8_t       *odd_dst;
+  const int16_t *filter = &filter_x0[3];
+  uint32_t      filter45;
+
+  filter45 = ((const int32_t *)filter)[0];
+
+  for (y = h; y--;) {
+    /* prefetch data to cache memory */
+    prefetch_load(src_ptr + src_stride);
+    prefetch_load(src_ptr + src_stride + 32);
+    prefetch_load(src_ptr + src_stride + 64);
+
+    src = src_ptr;
+    dst = dst_ptr;
+
+    odd_dst = (dst + dst_stride);
+
+    for (c = 0; c < 4; c++) {
+      __asm__ __volatile__ (
+          "ulw              %[qload1],        0(%[src])                       \n\t"
+          "ulw              %[qload2],        4(%[src])                       \n\t"
+
+          /* even 1. pixel */
+          "mtlo             %[vector_64],     $ac1                            \n\t" /* even 1 */
+          "mthi             $zero,            $ac1                            \n\t"
+          "mtlo             %[vector_64],     $ac2                            \n\t" /* even 2 */
+          "mthi             $zero,            $ac2                            \n\t"
+          "preceu.ph.qbr    %[p1],            %[qload1]                       \n\t"
+          "preceu.ph.qbl    %[p2],            %[qload1]                       \n\t"
+          "preceu.ph.qbr    %[p3],            %[qload2]                       \n\t"
+          "preceu.ph.qbl    %[p4],            %[qload2]                       \n\t"
+          "ulw              %[qload1],        8(%[src])                       \n\t"
+          "dpa.w.ph         $ac1,             %[p1],          %[filter45]     \n\t" /* even 1 */
+          "extp             %[Temp1],         $ac1,           31              \n\t" /* even 1 */
+
+          /* even 2. pixel */
+          "mtlo             %[vector_64],     $ac3                            \n\t" /* even 3 */
+          "mthi             $zero,            $ac3                            \n\t"
+          "preceu.ph.qbr    %[p1],            %[qload1]                       \n\t"
+          "preceu.ph.qbl    %[p5],            %[qload1]                       \n\t"
+          "ulw              %[qload2],        12(%[src])                      \n\t"
+          "dpa.w.ph         $ac2,             %[p2],          %[filter45]     \n\t" /* even 1 */
+          "lbux             %[st1],           %[Temp1](%[cm])                 \n\t" /* even 1 */
+          "extp             %[Temp2],         $ac2,           31              \n\t" /* even 1 */
+
+          /* even 3. pixel */
+          "mtlo             %[vector_64],     $ac1                            \n\t" /* even 4 */
+          "mthi             $zero,            $ac1                            \n\t"
+          "preceu.ph.qbr    %[p2],            %[qload2]                       \n\t"
+          "sb               %[st1],           0(%[dst])                       \n\t" /* even 1 */
+          "addu             %[dst],           %[dst],         %[dst_pitch_2]             \n\t"
+          "dpa.w.ph         $ac3,             %[p3],          %[filter45]     \n\t" /* even 3 */
+          "extp             %[Temp3],         $ac3,           31              \n\t" /* even 3 */
+          "lbux             %[st2],           %[Temp2](%[cm])                 \n\t" /* even 1 */
+
+          /* even 4. pixel */
+          "mtlo             %[vector_64],     $ac2                            \n\t" /* even 5 */
+          "mthi             $zero,            $ac2                            \n\t"
+          "preceu.ph.qbl    %[p3],            %[qload2]                       \n\t"
+          "sb               %[st2],           0(%[dst])                       \n\t" /* even 2 */
+          "addu             %[dst],           %[dst],         %[dst_pitch_2]  \n\t"
+          "dpa.w.ph         $ac1,             %[p4],          %[filter45]     \n\t" /* even 4 */
+          "extp             %[Temp1],         $ac1,           31              \n\t" /* even 4 */
+          "lbux             %[st3],           %[Temp3](%[cm])                 \n\t" /* even 3 */
+
+          /* even 5. pixel */
+          "mtlo             %[vector_64],     $ac3                            \n\t" /* even 6 */
+          "mthi             $zero,            $ac3                            \n\t"
+          "sb               %[st3],           0(%[dst])                       \n\t" /* even 3 */
+          "addu             %[dst],           %[dst],         %[dst_pitch_2]  \n\t"
+          "dpa.w.ph         $ac2,             %[p1],          %[filter45]     \n\t" /* even 5 */
+          "extp             %[Temp2],         $ac2,           31              \n\t" /* even 5 */
+          "lbux             %[st1],           %[Temp1](%[cm])                 \n\t" /* even 4 */
+
+          /* even 6. pixel */
+          "mtlo             %[vector_64],     $ac1                            \n\t" /* even 7 */
+          "mthi             $zero,            $ac1                            \n\t"
+          "sb               %[st1],           0(%[dst])                       \n\t" /* even 4 */
+          "addu             %[dst],           %[dst],         %[dst_pitch_2]  \n\t"
+          "ulw              %[qload1],        20(%[src])                      \n\t"
+          "dpa.w.ph         $ac3,             %[p5],          %[filter45]     \n\t" /* even 6 */
+          "extp             %[Temp3],         $ac3,           31              \n\t" /* even 6 */
+          "lbux             %[st2],           %[Temp2](%[cm])                 \n\t" /* even 5 */
+
+          /* even 7. pixel */
+          "mtlo             %[vector_64],     $ac2                            \n\t" /* even 8 */
+          "mthi             $zero,            $ac2                            \n\t"
+          "preceu.ph.qbr    %[p5],            %[qload1]                       \n\t"
+          "sb               %[st2],           0(%[dst])                       \n\t" /* even 5 */
+          "addu             %[dst],           %[dst],         %[dst_pitch_2]  \n\t"
+          "dpa.w.ph         $ac1,             %[p2],          %[filter45]     \n\t" /* even 7 */
+          "extp             %[Temp1],         $ac1,           31              \n\t" /* even 7 */
+          "lbux             %[st3],           %[Temp3](%[cm])                 \n\t" /* even 6 */
+
+          /* even 8. pixel */
+          "mtlo             %[vector_64],     $ac3                            \n\t" /* odd 1 */
+          "mthi             $zero,            $ac3                            \n\t"
+          "dpa.w.ph         $ac2,             %[p3],          %[filter45]     \n\t" /* even 8 */
+          "sb               %[st3],           0(%[dst])                       \n\t" /* even 6 */
+          "addu             %[dst],           %[dst],         %[dst_pitch_2]  \n\t"
+          "extp             %[Temp2],         $ac2,           31              \n\t" /* even 8 */
+          "lbux             %[st1],           %[Temp1](%[cm])                 \n\t" /* even 7 */
+
+          /* ODD pixels */
+          "ulw              %[qload1],        1(%[src])                       \n\t"
+          "ulw              %[qload2],        5(%[src])                       \n\t"
+
+          /* odd 1. pixel */
+          "mtlo             %[vector_64],     $ac1                            \n\t" /* odd 2 */
+          "mthi             $zero,            $ac1                            \n\t"
+          "preceu.ph.qbr    %[p1],            %[qload1]                       \n\t"
+          "preceu.ph.qbl    %[p2],            %[qload1]                       \n\t"
+          "preceu.ph.qbr    %[p3],            %[qload2]                       \n\t"
+          "preceu.ph.qbl    %[p4],            %[qload2]                       \n\t"
+          "sb               %[st1],           0(%[dst])                       \n\t" /* even 7 */
+          "addu             %[dst],           %[dst],         %[dst_pitch_2]  \n\t"
+          "ulw              %[qload2],        9(%[src])                       \n\t"
+          "dpa.w.ph         $ac3,             %[p1],          %[filter45]     \n\t" /* odd 1 */
+          "extp             %[Temp3],         $ac3,           31              \n\t" /* odd 1 */
+          "lbux             %[st2],           %[Temp2](%[cm])                 \n\t" /* even 8 */
+
+          /* odd 2. pixel */
+          "mtlo             %[vector_64],     $ac2                            \n\t" /* odd 3 */
+          "mthi             $zero,            $ac2                            \n\t"
+          "preceu.ph.qbr    %[p1],            %[qload2]                       \n\t"
+          "preceu.ph.qbl    %[p5],            %[qload2]                       \n\t"
+          "sb               %[st2],           0(%[dst])                       \n\t" /* even 8 */
+          "ulw              %[qload1],        13(%[src])                      \n\t"
+          "dpa.w.ph         $ac1,             %[p2],          %[filter45]     \n\t" /* odd 2 */
+          "extp             %[Temp1],         $ac1,           31              \n\t" /* odd 2 */
+          "lbux             %[st3],           %[Temp3](%[cm])                 \n\t" /* odd 1 */
+
+          /* odd 3. pixel */
+          "mtlo             %[vector_64],     $ac3                            \n\t" /* odd 4 */
+          "mthi             $zero,            $ac3                            \n\t"
+          "preceu.ph.qbr    %[p2],            %[qload1]                       \n\t"
+          "sb               %[st3],           0(%[odd_dst])                   \n\t" /* odd 1 */
+          "addu             %[odd_dst],       %[odd_dst],     %[dst_pitch_2]  \n\t"
+          "dpa.w.ph         $ac2,             %[p3],          %[filter45]     \n\t" /* odd 3 */
+          "extp             %[Temp2],         $ac2,           31              \n\t" /* odd 3 */
+          "lbux             %[st1],           %[Temp1](%[cm])                 \n\t" /* odd 2 */
+
+          /* odd 4. pixel */
+          "mtlo             %[vector_64],     $ac1                            \n\t" /* odd 5 */
+          "mthi             $zero,            $ac1                            \n\t"
+          "preceu.ph.qbl    %[p3],            %[qload1]                       \n\t"
+          "sb               %[st1],           0(%[odd_dst])                   \n\t" /* odd 2 */
+          "addu             %[odd_dst],       %[odd_dst],     %[dst_pitch_2]  \n\t"
+          "dpa.w.ph         $ac3,             %[p4],          %[filter45]     \n\t" /* odd 4 */
+          "extp             %[Temp3],         $ac3,           31              \n\t" /* odd 4 */
+          "lbux             %[st2],           %[Temp2](%[cm])                 \n\t" /* odd 3 */
+
+          /* odd 5. pixel */
+          "mtlo             %[vector_64],     $ac2                            \n\t" /* odd 6 */
+          "mthi             $zero,            $ac2                            \n\t"
+          "sb               %[st2],           0(%[odd_dst])                   \n\t" /* odd 3 */
+          "addu             %[odd_dst],       %[odd_dst],     %[dst_pitch_2]  \n\t"
+          "dpa.w.ph         $ac1,             %[p1],          %[filter45]     \n\t" /* odd 5 */
+          "extp             %[Temp1],         $ac1,           31              \n\t" /* odd 5 */
+          "lbux             %[st3],           %[Temp3](%[cm])                 \n\t" /* odd 4 */
+
+          /* odd 6. pixel */
+          "mtlo             %[vector_64],     $ac3                            \n\t" /* odd 7 */
+          "mthi             $zero,            $ac3                            \n\t"
+          "sb               %[st3],           0(%[odd_dst])                   \n\t" /* odd 4 */
+          "addu             %[odd_dst],       %[odd_dst],     %[dst_pitch_2]  \n\t"
+          "ulw              %[qload1],        21(%[src])                      \n\t"
+          "dpa.w.ph         $ac2,             %[p5],          %[filter45]     \n\t" /* odd 6 */
+          "extp             %[Temp2],         $ac2,           31              \n\t" /* odd 6 */
+          "lbux             %[st1],           %[Temp1](%[cm])                 \n\t" /* odd 5 */
+
+          /* odd 7. pixel */
+          "mtlo             %[vector_64],     $ac1                            \n\t" /* odd 8 */
+          "mthi             $zero,            $ac1                            \n\t"
+          "preceu.ph.qbr    %[p5],            %[qload1]                       \n\t"
+          "sb               %[st1],           0(%[odd_dst])                   \n\t" /* odd 5 */
+          "addu             %[odd_dst],       %[odd_dst],     %[dst_pitch_2]  \n\t"
+          "dpa.w.ph         $ac3,             %[p2],          %[filter45]     \n\t" /* odd 7 */
+          "extp             %[Temp3],         $ac3,           31              \n\t" /* odd 7 */
+
+          /* odd 8. pixel */
+          "dpa.w.ph         $ac1,             %[p3],          %[filter45]     \n\t" /* odd 8 */
+          "extp             %[Temp1],         $ac1,           31              \n\t" /* odd 8 */
+
+          "lbux             %[st2],           %[Temp2](%[cm])                 \n\t" /* odd 6 */
+          "lbux             %[st3],           %[Temp3](%[cm])                 \n\t" /* odd 7 */
+          "lbux             %[st1],           %[Temp1](%[cm])                 \n\t" /* odd 8 */
+
+          "sb               %[st2],           0(%[odd_dst])                   \n\t" /* odd 6 */
+          "addu             %[odd_dst],       %[odd_dst],     %[dst_pitch_2]  \n\t"
+
+          "sb               %[st3],           0(%[odd_dst])                   \n\t" /* odd 7 */
+          "addu             %[odd_dst],       %[odd_dst],     %[dst_pitch_2]  \n\t"
+
+          "sb               %[st1],           0(%[odd_dst])                   \n\t" /* odd 8 */
+
+          : [qload1] "=&r" (qload1), [qload2] "=&r" (qload2), [p5] "=&r" (p5),
+            [st1] "=&r" (st1), [st2] "=&r" (st2), [st3] "=&r" (st3),
+            [p1] "=&r" (p1), [p2] "=&r" (p2), [p3] "=&r" (p3), [p4] "=&r" (p4),
+            [Temp1] "=&r" (Temp1), [Temp2] "=&r" (Temp2), [Temp3] "=&r" (Temp3),
+            [dst] "+r" (dst), [odd_dst] "+r" (odd_dst)
+          : [filter45] "r" (filter45), [vector_64] "r" (vector_64),
+            [cm] "r" (cm),
+            [src] "r" (src), [dst_pitch_2] "r" (dst_pitch_2)
+      );
+
+      src += 16;
+      dst = (dst_ptr + ((c + 1) * 16 * dst_stride));
+      odd_dst = (dst + dst_stride);
+    }
+
+    /* Next row... */
+    src_ptr += src_stride;
+    dst_ptr += 1;
+  }
+}
+
+void convolve_bi_horiz_transposed(const uint8_t *src, ptrdiff_t src_stride,
+                                  uint8_t *dst, ptrdiff_t dst_stride,
+                                  const int16_t *filter, int w, int h) {
+  int x, y;
+
+  for (y = 0; y < h; ++y) {
+    for (x = 0; x < w; ++x) {
+      int sum = 0;
+
+      sum += src[x] * filter[3];
+      sum += src[x + 1] * filter[4];
+
+      dst[x * dst_stride] = clip_pixel(ROUND_POWER_OF_TWO(sum, FILTER_BITS));
+    }
+
+    src += src_stride;
+    dst += 1;
+  }
+}
+
+void vpx_convolve2_dspr2(const uint8_t *src, ptrdiff_t src_stride,
+                         uint8_t *dst, ptrdiff_t dst_stride,
+                         const int16_t *filter,
+                         int w, int h) {
+  uint32_t pos = 38;
+
+  /* bit positon for extract from acc */
+  __asm__ __volatile__ (
+    "wrdsp      %[pos],     1           \n\t"
+    :
+    : [pos] "r" (pos)
+  );
+
+  /* prefetch data to cache memory */
+  prefetch_load(src);
+  prefetch_load(src + 32);
+
+  switch (w) {
+    case 4:
+      convolve_bi_horiz_4_transposed_dspr2(src, src_stride,
+                                           dst, dst_stride,
+                                           filter, h);
+      break;
+    case 8:
+      convolve_bi_horiz_8_transposed_dspr2(src, src_stride,
+                                           dst, dst_stride,
+                                           filter, h);
+      break;
+    case 16:
+    case 32:
+      convolve_bi_horiz_16_transposed_dspr2(src, src_stride,
+                                            dst, dst_stride,
+                                            filter, h,
+                                            (w/16));
+      break;
+    case 64:
+      prefetch_load(src + 32);
+      convolve_bi_horiz_64_transposed_dspr2(src, src_stride,
+                                            dst, dst_stride,
+                                            filter, h);
+      break;
+    default:
+      convolve_bi_horiz_transposed(src, src_stride,
+                                   dst, dst_stride,
+                                   filter, w, h);
+      break;
+  }
+}
+#endif
--- /dev/null
+++ b/vpx_dsp/mips/convolve2_horiz_dspr2.c
@@ -1,0 +1,711 @@
+/*
+ *  Copyright (c) 2013 The WebM project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <assert.h>
+#include <stdio.h>
+
+#include "./vpx_dsp_rtcd.h"
+#include "vpx_dsp/mips/convolve_common_dspr2.h"
+#include "vpx_dsp/vpx_convolve.h"
+#include "vpx_dsp/vpx_dsp_common.h"
+#include "vpx_ports/mem.h"
+
+#if HAVE_DSPR2
+static void convolve_bi_horiz_4_dspr2(const uint8_t *src,
+                                      int32_t src_stride,
+                                      uint8_t *dst,
+                                      int32_t dst_stride,
+                                      const int16_t *filter_x0,
+                                      int32_t h) {
+  int32_t y;
+  uint8_t *cm = vpx_ff_cropTbl;
+  int32_t Temp1, Temp2, Temp3, Temp4;
+  uint32_t vector4a = 64;
+  uint32_t tp1, tp2;
+  uint32_t p1, p2;
+  const int16_t *filter = &filter_x0[3];
+  uint32_t filter45;;
+
+  filter45 = ((const int32_t *)filter)[0];
+
+  for (y = h; y--;) {
+    /* prefetch data to cache memory */
+    prefetch_load(src + src_stride);
+    prefetch_load(src + src_stride + 32);
+    prefetch_store(dst + dst_stride);
+
+    __asm__ __volatile__ (
+        "ulw              %[tp1],      0(%[src])                      \n\t"
+        "ulw              %[tp2],      4(%[src])                      \n\t"
+
+        /* even 1. pixel */
+        "mtlo             %[vector4a], $ac3                           \n\t"
+        "mthi             $zero,       $ac3                           \n\t"
+        "preceu.ph.qbr    %[p1],       %[tp1]                         \n\t"
+        "preceu.ph.qbl    %[p2],       %[tp1]                         \n\t"
+        "dpa.w.ph         $ac3,        %[p1],          %[filter45]    \n\t"
+        "extp             %[Temp1],    $ac3,           31             \n\t"
+
+        /* even 2. pixel */
+        "mtlo             %[vector4a], $ac2                           \n\t"
+        "mthi             $zero,       $ac2                           \n\t"
+        "balign           %[tp2],      %[tp1],         3              \n\t"
+        "dpa.w.ph         $ac2,        %[p2],          %[filter45]    \n\t"
+        "extp             %[Temp3],    $ac2,           31             \n\t"
+
+        /* odd 1. pixel */
+        "lbux             %[tp1],      %[Temp1](%[cm])                \n\t"
+        "mtlo             %[vector4a], $ac3                           \n\t"
+        "mthi             $zero,       $ac3                           \n\t"
+        "preceu.ph.qbr    %[p1],       %[tp2]                         \n\t"
+        "preceu.ph.qbl    %[p2],       %[tp2]                         \n\t"
+        "dpa.w.ph         $ac3,        %[p1],          %[filter45]    \n\t"
+        "extp             %[Temp2],    $ac3,           31             \n\t"
+
+        /* odd 2. pixel */
+        "lbux             %[tp2],      %[Temp3](%[cm])                \n\t"
+        "mtlo             %[vector4a], $ac2                           \n\t"
+        "mthi             $zero,       $ac2                           \n\t"
+        "dpa.w.ph         $ac2,        %[p2],          %[filter45]    \n\t"
+        "extp             %[Temp4],    $ac2,           31             \n\t"
+
+        /* clamp */
+        "lbux             %[p1],       %[Temp2](%[cm])                \n\t"
+        "lbux             %[p2],       %[Temp4](%[cm])                \n\t"
+
+        /* store bytes */
+        "sb               %[tp1],      0(%[dst])                      \n\t"
+        "sb               %[p1],       1(%[dst])                      \n\t"
+        "sb               %[tp2],      2(%[dst])                      \n\t"
+        "sb               %[p2],       3(%[dst])                      \n\t"
+
+        : [tp1] "=&r" (tp1), [tp2] "=&r" (tp2),
+          [p1] "=&r" (p1), [p2] "=&r" (p2),
+          [Temp1] "=&r" (Temp1), [Temp2] "=&r" (Temp2),
+          [Temp3] "=&r" (Temp3), [Temp4] "=&r" (Temp4)
+        : [filter45] "r" (filter45), [vector4a] "r" (vector4a),
+          [cm] "r" (cm), [dst] "r" (dst), [src] "r" (src)
+    );
+
+    /* Next row... */
+    src += src_stride;
+    dst += dst_stride;
+  }
+}
+
+static void convolve_bi_horiz_8_dspr2(const uint8_t *src,
+                                      int32_t src_stride,
+                                      uint8_t *dst,
+                                      int32_t dst_stride,
+                                      const int16_t *filter_x0,
+                                      int32_t h) {
+  int32_t y;
+  uint8_t *cm = vpx_ff_cropTbl;
+  uint32_t vector4a = 64;
+  int32_t Temp1, Temp2, Temp3;
+  uint32_t tp1, tp2, tp3;
+  uint32_t p1, p2, p3, p4;
+  uint32_t st0, st1;
+  const int16_t *filter = &filter_x0[3];
+  uint32_t filter45;;
+
+  filter45 = ((const int32_t *)filter)[0];
+
+  for (y = h; y--;) {
+    /* prefetch data to cache memory */
+    prefetch_load(src + src_stride);
+    prefetch_load(src + src_stride + 32);
+    prefetch_store(dst + dst_stride);
+
+    __asm__ __volatile__ (
+        "ulw              %[tp1],      0(%[src])                      \n\t"
+        "ulw              %[tp2],      4(%[src])                      \n\t"
+
+        /* even 1. pixel */
+        "mtlo             %[vector4a], $ac3                           \n\t"
+        "mthi             $zero,       $ac3                           \n\t"
+        "mtlo             %[vector4a], $ac2                           \n\t"
+        "mthi             $zero,       $ac2                           \n\t"
+        "preceu.ph.qbr    %[p1],       %[tp1]                         \n\t"
+        "preceu.ph.qbl    %[p2],       %[tp1]                         \n\t"
+        "preceu.ph.qbr    %[p3],       %[tp2]                         \n\t"
+        "preceu.ph.qbl    %[p4],       %[tp2]                         \n\t"
+        "ulw              %[tp3],      8(%[src])                      \n\t"
+        "dpa.w.ph         $ac3,        %[p1],          %[filter45]    \n\t"
+        "extp             %[Temp1],    $ac3,           31             \n\t"
+
+        /* even 2. pixel */
+        "dpa.w.ph         $ac2,        %[p2],          %[filter45]    \n\t"
+        "extp             %[Temp3],    $ac2,           31             \n\t"
+
+        /* even 3. pixel */
+        "lbux             %[st0],      %[Temp1](%[cm])                \n\t"
+        "mtlo             %[vector4a], $ac1                           \n\t"
+        "mthi             $zero,       $ac1                           \n\t"
+        "dpa.w.ph         $ac1,        %[p3],          %[filter45]    \n\t"
+        "extp             %[Temp1],    $ac1,           31             \n\t"
+
+        /* even 4. pixel */
+        "mtlo             %[vector4a], $ac2                           \n\t"
+        "mthi             $zero,       $ac2                           \n\t"
+        "mtlo             %[vector4a], $ac3                           \n\t"
+        "mthi             $zero,       $ac3                           \n\t"
+        "sb               %[st0],      0(%[dst])                      \n\t"
+        "lbux             %[st1],      %[Temp3](%[cm])                \n\t"
+
+        "balign           %[tp3],      %[tp2],         3              \n\t"
+        "balign           %[tp2],      %[tp1],         3              \n\t"
+
+        "dpa.w.ph         $ac2,        %[p4],          %[filter45]    \n\t"
+        "extp             %[Temp3],    $ac2,           31             \n\t"
+
+        "lbux             %[st0],      %[Temp1](%[cm])                \n\t"
+
+        /* odd 1. pixel */
+        "mtlo             %[vector4a], $ac1                           \n\t"
+        "mthi             $zero,       $ac1                           \n\t"
+        "sb               %[st1],      2(%[dst])                      \n\t"
+        "preceu.ph.qbr    %[p1],       %[tp2]                         \n\t"
+        "preceu.ph.qbl    %[p2],       %[tp2]                         \n\t"
+        "preceu.ph.qbr    %[p3],       %[tp3]                         \n\t"
+        "preceu.ph.qbl    %[p4],       %[tp3]                         \n\t"
+        "sb               %[st0],      4(%[dst])                      \n\t"
+        "dpa.w.ph         $ac3,        %[p1],          %[filter45]    \n\t"
+        "extp             %[Temp2],    $ac3,           31             \n\t"
+
+        /* odd 2. pixel */
+        "mtlo             %[vector4a], $ac3                           \n\t"
+        "mthi             $zero,       $ac3                           \n\t"
+        "mtlo             %[vector4a], $ac2                           \n\t"
+        "mthi             $zero,       $ac2                           \n\t"
+        "lbux             %[st0],      %[Temp3](%[cm])                \n\t"
+        "dpa.w.ph         $ac1,        %[p2],          %[filter45]    \n\t"
+        "extp             %[Temp3],    $ac1,           31             \n\t"
+
+        /* odd 3. pixel */
+        "lbux             %[st1],      %[Temp2](%[cm])                \n\t"
+        "dpa.w.ph         $ac3,        %[p3],          %[filter45]    \n\t"
+        "extp             %[Temp2],    $ac3,           31             \n\t"
+
+        /* odd 4. pixel */
+        "sb               %[st1],      1(%[dst])                      \n\t"
+        "sb               %[st0],      6(%[dst])                      \n\t"
+        "dpa.w.ph         $ac2,        %[p4],          %[filter45]    \n\t"
+        "extp             %[Temp1],    $ac2,           31             \n\t"
+
+        /* clamp */
+        "lbux             %[p4],       %[Temp3](%[cm])                \n\t"
+        "lbux             %[p2],       %[Temp2](%[cm])                \n\t"
+        "lbux             %[p1],       %[Temp1](%[cm])                \n\t"
+
+        /* store bytes */
+        "sb               %[p4],       3(%[dst])                      \n\t"
+        "sb               %[p2],       5(%[dst])                      \n\t"
+        "sb               %[p1],       7(%[dst])                      \n\t"
+
+        : [tp1] "=&r" (tp1), [tp2] "=&r" (tp2), [tp3] "=&r" (tp3),
+          [st0] "=&r" (st0), [st1] "=&r" (st1),
+          [p1] "=&r" (p1), [p2] "=&r" (p2), [p3] "=&r" (p3), [p4] "=&r" (p4),
+          [Temp1] "=&r" (Temp1), [Temp2] "=&r" (Temp2), [Temp3] "=&r" (Temp3)
+        : [filter45] "r" (filter45), [vector4a] "r" (vector4a),
+          [cm] "r" (cm), [dst] "r" (dst), [src] "r" (src)
+    );
+
+    /* Next row... */
+    src += src_stride;
+    dst += dst_stride;
+  }
+}
+
+static void convolve_bi_horiz_16_dspr2(const uint8_t *src_ptr,
+                                       int32_t src_stride,
+                                       uint8_t *dst_ptr,
+                                       int32_t dst_stride,
+                                       const int16_t *filter_x0,
+                                       int32_t h,
+                                       int32_t count) {
+  int32_t y, c;
+  const uint8_t *src;
+  uint8_t *dst;
+  uint8_t *cm = vpx_ff_cropTbl;
+  uint32_t vector_64 = 64;
+  int32_t Temp1, Temp2, Temp3;
+  uint32_t qload1, qload2, qload3;
+  uint32_t p1, p2, p3, p4, p5;
+  uint32_t st1, st2, st3;
+  const int16_t *filter = &filter_x0[3];
+  uint32_t filter45;;
+
+  filter45 = ((const int32_t *)filter)[0];
+
+  for (y = h; y--;) {
+    src = src_ptr;
+    dst = dst_ptr;
+
+    /* prefetch data to cache memory */
+    prefetch_load(src_ptr + src_stride);
+    prefetch_load(src_ptr + src_stride + 32);
+    prefetch_store(dst_ptr + dst_stride);
+
+    for (c = 0; c < count; c++) {
+      __asm__ __volatile__ (
+          "ulw              %[qload1],    0(%[src])                    \n\t"
+          "ulw              %[qload2],    4(%[src])                    \n\t"
+
+          /* even 1. pixel */
+          "mtlo             %[vector_64], $ac1                         \n\t" /* even 1 */
+          "mthi             $zero,        $ac1                         \n\t"
+          "mtlo             %[vector_64], $ac2                         \n\t" /* even 2 */
+          "mthi             $zero,        $ac2                         \n\t"
+          "preceu.ph.qbr    %[p1],        %[qload1]                    \n\t"
+          "preceu.ph.qbl    %[p2],        %[qload1]                    \n\t"
+          "preceu.ph.qbr    %[p3],        %[qload2]                    \n\t"
+          "preceu.ph.qbl    %[p4],        %[qload2]                    \n\t"
+          "ulw              %[qload3],    8(%[src])                    \n\t"
+          "dpa.w.ph         $ac1,         %[p1],          %[filter45]  \n\t" /* even 1 */
+          "extp             %[Temp1],     $ac1,           31           \n\t" /* even 1 */
+
+          /* even 2. pixel */
+          "mtlo             %[vector_64], $ac3                         \n\t" /* even 3 */
+          "mthi             $zero,        $ac3                         \n\t"
+          "preceu.ph.qbr    %[p1],        %[qload3]                    \n\t"
+          "preceu.ph.qbl    %[p5],        %[qload3]                    \n\t"
+          "ulw              %[qload1],    12(%[src])                   \n\t"
+          "dpa.w.ph         $ac2,         %[p2],          %[filter45]  \n\t" /* even 1 */
+          "extp             %[Temp2],     $ac2,           31           \n\t" /* even 1 */
+          "lbux             %[st1],       %[Temp1](%[cm])              \n\t" /* even 1 */
+
+          /* even 3. pixel */
+          "mtlo             %[vector_64], $ac1                         \n\t" /* even 4 */
+          "mthi             $zero,        $ac1                         \n\t"
+          "preceu.ph.qbr    %[p2],        %[qload1]                    \n\t"
+          "sb               %[st1],       0(%[dst])                    \n\t" /* even 1 */
+          "dpa.w.ph         $ac3,         %[p3],          %[filter45]  \n\t" /* even 3 */
+          "extp             %[Temp3],     $ac3,           31           \n\t" /* even 3 */
+          "lbux             %[st2],       %[Temp2](%[cm])              \n\t" /* even 1 */
+
+          /* even 4. pixel */
+          "mtlo             %[vector_64], $ac2                         \n\t" /* even 5 */
+          "mthi             $zero,        $ac2                         \n\t"
+          "preceu.ph.qbl    %[p3],        %[qload1]                    \n\t"
+          "sb               %[st2],       2(%[dst])                    \n\t" /* even 1 */
+          "dpa.w.ph         $ac1,         %[p4],          %[filter45]  \n\t" /* even 4 */
+          "extp             %[Temp1],     $ac1,           31           \n\t" /* even 4 */
+          "lbux             %[st3],       %[Temp3](%[cm])              \n\t" /* even 3 */
+
+          /* even 5. pixel */
+          "mtlo             %[vector_64], $ac3                         \n\t" /* even 6 */
+          "mthi             $zero,        $ac3                         \n\t"
+          "sb               %[st3],       4(%[dst])                    \n\t" /* even 3 */
+          "dpa.w.ph         $ac2,         %[p1],          %[filter45]  \n\t" /* even 5 */
+          "extp             %[Temp2],     $ac2,           31           \n\t" /* even 5 */
+          "lbux             %[st1],       %[Temp1](%[cm])              \n\t" /* even 4 */
+
+          /* even 6. pixel */
+          "mtlo             %[vector_64], $ac1                         \n\t" /* even 7 */
+          "mthi             $zero,        $ac1                         \n\t"
+          "sb               %[st1],       6(%[dst])                    \n\t" /* even 4 */
+          "dpa.w.ph         $ac3,         %[p5],          %[filter45]  \n\t" /* even 6 */
+          "extp             %[Temp3],     $ac3,           31           \n\t" /* even 6 */
+          "lbux             %[st2],       %[Temp2](%[cm])              \n\t" /* even 5 */
+
+          /* even 7. pixel */
+          "mtlo             %[vector_64], $ac2                         \n\t" /* even 8 */
+          "mthi             $zero,        $ac2                         \n\t"
+          "sb               %[st2],       8(%[dst])                    \n\t" /* even 5 */
+          "dpa.w.ph         $ac1,         %[p2],          %[filter45]  \n\t" /* even 7 */
+          "extp             %[Temp1],     $ac1,           31           \n\t" /* even 7 */
+          "lbux             %[st3],       %[Temp3](%[cm])              \n\t" /* even 6 */
+
+          /* even 8. pixel */
+          "mtlo             %[vector_64], $ac3                         \n\t" /* odd 1 */
+          "mthi             $zero,        $ac3                         \n\t"
+          "dpa.w.ph         $ac2,         %[p3],          %[filter45]  \n\t" /* even 8 */
+          "sb               %[st3],       10(%[dst])                   \n\t" /* even 6 */
+          "extp             %[Temp2],     $ac2,           31           \n\t" /* even 8 */
+          "lbux             %[st1],       %[Temp1](%[cm])              \n\t" /* even 7 */
+
+          /* ODD pixels */
+          "ulw              %[qload1],    1(%[src])                    \n\t"
+          "ulw              %[qload2],    5(%[src])                    \n\t"
+
+          /* odd 1. pixel */
+          "mtlo             %[vector_64], $ac1                         \n\t" /* odd 2 */
+          "mthi             $zero,        $ac1                         \n\t"
+          "preceu.ph.qbr    %[p1],        %[qload1]                    \n\t"
+          "preceu.ph.qbl    %[p2],        %[qload1]                    \n\t"
+          "preceu.ph.qbr    %[p3],        %[qload2]                    \n\t"
+          "preceu.ph.qbl    %[p4],        %[qload2]                    \n\t"
+          "sb               %[st1],       12(%[dst])                   \n\t" /* even 7 */
+          "ulw              %[qload3],    9(%[src])                    \n\t"
+          "dpa.w.ph         $ac3,         %[p1],          %[filter45]  \n\t" /* odd 1 */
+          "extp             %[Temp3],     $ac3,           31           \n\t" /* odd 1 */
+          "lbux             %[st2],       %[Temp2](%[cm])              \n\t" /* even 8 */
+
+          /* odd 2. pixel */
+          "mtlo             %[vector_64], $ac2                         \n\t" /* odd 3 */
+          "mthi             $zero,        $ac2                         \n\t"
+          "preceu.ph.qbr    %[p1],        %[qload3]                    \n\t"
+          "preceu.ph.qbl    %[p5],        %[qload3]                    \n\t"
+          "sb               %[st2],       14(%[dst])                   \n\t" /* even 8 */
+          "ulw              %[qload1],    13(%[src])                   \n\t"
+          "dpa.w.ph         $ac1,         %[p2],          %[filter45]  \n\t" /* odd 2 */
+          "extp             %[Temp1],     $ac1,           31           \n\t" /* odd 2 */
+          "lbux             %[st3],       %[Temp3](%[cm])              \n\t" /* odd 1 */
+
+          /* odd 3. pixel */
+          "mtlo             %[vector_64], $ac3                         \n\t" /* odd 4 */
+          "mthi             $zero,        $ac3                         \n\t"
+          "preceu.ph.qbr    %[p2],        %[qload1]                    \n\t"
+          "sb               %[st3],       1(%[dst])                    \n\t" /* odd 1 */
+          "dpa.w.ph         $ac2,         %[p3],          %[filter45]  \n\t" /* odd 3 */
+          "extp             %[Temp2],     $ac2,           31           \n\t" /* odd 3 */
+          "lbux             %[st1],       %[Temp1](%[cm])              \n\t" /* odd 2 */
+
+          /* odd 4. pixel */
+          "mtlo             %[vector_64], $ac1                         \n\t" /* odd 5 */
+          "mthi             $zero,        $ac1                         \n\t"
+          "preceu.ph.qbl    %[p3],        %[qload1]                    \n\t"
+          "sb               %[st1],       3(%[dst])                    \n\t" /* odd 2 */
+          "dpa.w.ph         $ac3,         %[p4],          %[filter45]  \n\t" /* odd 4 */
+          "extp             %[Temp3],     $ac3,           31           \n\t" /* odd 4 */
+          "lbux             %[st2],       %[Temp2](%[cm])              \n\t" /* odd 3 */
+
+          /* odd 5. pixel */
+          "mtlo             %[vector_64], $ac2                         \n\t" /* odd 6 */
+          "mthi             $zero,        $ac2                         \n\t"
+          "sb               %[st2],       5(%[dst])                    \n\t" /* odd 3 */
+          "dpa.w.ph         $ac1,         %[p1],          %[filter45]  \n\t" /* odd 5 */
+          "extp             %[Temp1],     $ac1,           31           \n\t" /* odd 5 */
+          "lbux             %[st3],       %[Temp3](%[cm])              \n\t" /* odd 4 */
+
+          /* odd 6. pixel */
+          "mtlo             %[vector_64], $ac3                         \n\t" /* odd 7 */
+          "mthi             $zero,        $ac3                         \n\t"
+          "sb               %[st3],       7(%[dst])                    \n\t" /* odd 4 */
+          "dpa.w.ph         $ac2,         %[p5],          %[filter45]  \n\t" /* odd 6 */
+          "extp             %[Temp2],     $ac2,           31           \n\t" /* odd 6 */
+          "lbux             %[st1],       %[Temp1](%[cm])              \n\t" /* odd 5 */
+
+          /* odd 7. pixel */
+          "mtlo             %[vector_64], $ac1                         \n\t" /* odd 8 */
+          "mthi             $zero,        $ac1                         \n\t"
+          "sb               %[st1],       9(%[dst])                    \n\t" /* odd 5 */
+          "dpa.w.ph         $ac3,         %[p2],          %[filter45]  \n\t" /* odd 7 */
+          "extp             %[Temp3],     $ac3,           31           \n\t" /* odd 7 */
+
+          /* odd 8. pixel */
+          "dpa.w.ph         $ac1,         %[p3],          %[filter45]  \n\t" /* odd 8 */
+          "extp             %[Temp1],     $ac1,           31           \n\t" /* odd 8 */
+
+          "lbux             %[st2],       %[Temp2](%[cm])              \n\t" /* odd 6 */
+          "lbux             %[st3],       %[Temp3](%[cm])              \n\t" /* odd 7 */
+          "lbux             %[st1],       %[Temp1](%[cm])              \n\t" /* odd 8 */
+
+          "sb               %[st2],       11(%[dst])                   \n\t" /* odd 6 */
+          "sb               %[st3],       13(%[dst])                   \n\t" /* odd 7 */
+          "sb               %[st1],       15(%[dst])                   \n\t" /* odd 8 */
+
+          : [qload1] "=&r" (qload1), [qload2] "=&r" (qload2), [qload3] "=&r" (qload3),
+            [st1] "=&r" (st1), [st2] "=&r" (st2), [st3] "=&r" (st3),
+            [p1] "=&r" (p1), [p2] "=&r" (p2), [p3] "=&r" (p3), [p4] "=&r" (p4),
+            [p5] "=&r" (p5),
+            [Temp1] "=&r" (Temp1), [Temp2] "=&r" (Temp2), [Temp3] "=&r" (Temp3)
+          : [filter45] "r" (filter45), [vector_64] "r" (vector_64),
+            [cm] "r" (cm), [dst] "r" (dst), [src] "r" (src)
+      );
+
+      src += 16;
+      dst += 16;
+    }
+
+    /* Next row... */
+    src_ptr += src_stride;
+    dst_ptr += dst_stride;
+  }
+}
+
+static void convolve_bi_horiz_64_dspr2(const uint8_t *src_ptr,
+                                       int32_t src_stride,
+                                       uint8_t *dst_ptr,
+                                       int32_t dst_stride,
+                                       const int16_t *filter_x0,
+                                       int32_t h) {
+  int32_t y, c;
+  const uint8_t *src;
+  uint8_t *dst;
+  uint8_t *cm = vpx_ff_cropTbl;
+  uint32_t vector_64 = 64;
+  int32_t Temp1, Temp2, Temp3;
+  uint32_t qload1, qload2, qload3;
+  uint32_t p1, p2, p3, p4, p5;
+  uint32_t st1, st2, st3;
+  const int16_t *filter = &filter_x0[3];
+  uint32_t filter45;;
+
+  filter45 = ((const int32_t *)filter)[0];
+
+  for (y = h; y--;) {
+    src = src_ptr;
+    dst = dst_ptr;
+
+    /* prefetch data to cache memory */
+    prefetch_load(src_ptr + src_stride);
+    prefetch_load(src_ptr + src_stride + 32);
+    prefetch_load(src_ptr + src_stride + 64);
+    prefetch_store(dst_ptr + dst_stride);
+    prefetch_store(dst_ptr + dst_stride + 32);
+
+    for (c = 0; c < 4; c++) {
+      __asm__ __volatile__ (
+          "ulw              %[qload1],    0(%[src])                    \n\t"
+          "ulw              %[qload2],    4(%[src])                    \n\t"
+
+          /* even 1. pixel */
+          "mtlo             %[vector_64], $ac1                         \n\t" /* even 1 */
+          "mthi             $zero,        $ac1                         \n\t"
+          "mtlo             %[vector_64], $ac2                         \n\t" /* even 2 */
+          "mthi             $zero,        $ac2                         \n\t"
+          "preceu.ph.qbr    %[p1],        %[qload1]                    \n\t"
+          "preceu.ph.qbl    %[p2],        %[qload1]                    \n\t"
+          "preceu.ph.qbr    %[p3],        %[qload2]                    \n\t"
+          "preceu.ph.qbl    %[p4],        %[qload2]                    \n\t"
+          "ulw              %[qload3],    8(%[src])                    \n\t"
+          "dpa.w.ph         $ac1,         %[p1],          %[filter45]  \n\t" /* even 1 */
+          "extp             %[Temp1],     $ac1,           31           \n\t" /* even 1 */
+
+          /* even 2. pixel */
+          "mtlo             %[vector_64], $ac3                         \n\t" /* even 3 */
+          "mthi             $zero,        $ac3                         \n\t"
+          "preceu.ph.qbr    %[p1],        %[qload3]                    \n\t"
+          "preceu.ph.qbl    %[p5],        %[qload3]                    \n\t"
+          "ulw              %[qload1],    12(%[src])                   \n\t"
+          "dpa.w.ph         $ac2,         %[p2],          %[filter45]  \n\t" /* even 1 */
+          "extp             %[Temp2],     $ac2,           31           \n\t" /* even 1 */
+          "lbux             %[st1],       %[Temp1](%[cm])              \n\t" /* even 1 */
+
+          /* even 3. pixel */
+          "mtlo             %[vector_64], $ac1                         \n\t" /* even 4 */
+          "mthi             $zero,        $ac1                         \n\t"
+          "preceu.ph.qbr    %[p2],        %[qload1]                    \n\t"
+          "sb               %[st1],       0(%[dst])                    \n\t" /* even 1 */
+          "dpa.w.ph         $ac3,         %[p3],          %[filter45]  \n\t" /* even 3 */
+          "extp             %[Temp3],     $ac3,           31           \n\t" /* even 3 */
+          "lbux             %[st2],       %[Temp2](%[cm])              \n\t" /* even 1 */
+
+          /* even 4. pixel */
+          "mtlo             %[vector_64], $ac2                         \n\t" /* even 5 */
+          "mthi             $zero,        $ac2                         \n\t"
+          "preceu.ph.qbl    %[p3],        %[qload1]                    \n\t"
+          "sb               %[st2],       2(%[dst])                    \n\t" /* even 1 */
+          "dpa.w.ph         $ac1,         %[p4],          %[filter45]  \n\t" /* even 4 */
+          "extp             %[Temp1],     $ac1,           31           \n\t" /* even 4 */
+          "lbux             %[st3],       %[Temp3](%[cm])              \n\t" /* even 3 */
+
+          /* even 5. pixel */
+          "mtlo             %[vector_64], $ac3                         \n\t" /* even 6 */
+          "mthi             $zero,        $ac3                         \n\t"
+          "sb               %[st3],       4(%[dst])                    \n\t" /* even 3 */
+          "dpa.w.ph         $ac2,         %[p1],          %[filter45]  \n\t" /* even 5 */
+          "extp             %[Temp2],     $ac2,           31           \n\t" /* even 5 */
+          "lbux             %[st1],       %[Temp1](%[cm])              \n\t" /* even 4 */
+
+          /* even 6. pixel */
+          "mtlo             %[vector_64], $ac1                         \n\t" /* even 7 */
+          "mthi             $zero,        $ac1                         \n\t"
+          "sb               %[st1],       6(%[dst])                    \n\t" /* even 4 */
+          "dpa.w.ph         $ac3,         %[p5],          %[filter45]  \n\t" /* even 6 */
+          "extp             %[Temp3],     $ac3,           31           \n\t" /* even 6 */
+          "lbux             %[st2],       %[Temp2](%[cm])              \n\t" /* even 5 */
+
+          /* even 7. pixel */
+          "mtlo             %[vector_64], $ac2                         \n\t" /* even 8 */
+          "mthi             $zero,        $ac2                         \n\t"
+          "sb               %[st2],       8(%[dst])                    \n\t" /* even 5 */
+          "dpa.w.ph         $ac1,         %[p2],          %[filter45]  \n\t" /* even 7 */
+          "extp             %[Temp1],     $ac1,           31           \n\t" /* even 7 */
+          "lbux             %[st3],       %[Temp3](%[cm])              \n\t" /* even 6 */
+
+          /* even 8. pixel */
+          "mtlo             %[vector_64], $ac3                         \n\t" /* odd 1 */
+          "mthi             $zero,        $ac3                         \n\t"
+          "dpa.w.ph         $ac2,         %[p3],          %[filter45]  \n\t" /* even 8 */
+          "sb               %[st3],       10(%[dst])                   \n\t" /* even 6 */
+          "extp             %[Temp2],     $ac2,           31           \n\t" /* even 8 */
+          "lbux             %[st1],       %[Temp1](%[cm])              \n\t" /* even 7 */
+
+          /* ODD pixels */
+          "ulw              %[qload1],    1(%[src])                    \n\t"
+          "ulw              %[qload2],    5(%[src])                    \n\t"
+
+          /* odd 1. pixel */
+          "mtlo             %[vector_64], $ac1                         \n\t" /* odd 2 */
+          "mthi             $zero,        $ac1                         \n\t"
+          "preceu.ph.qbr    %[p1],        %[qload1]                    \n\t"
+          "preceu.ph.qbl    %[p2],        %[qload1]                    \n\t"
+          "preceu.ph.qbr    %[p3],        %[qload2]                    \n\t"
+          "preceu.ph.qbl    %[p4],        %[qload2]                    \n\t"
+          "sb               %[st1],       12(%[dst])                   \n\t" /* even 7 */
+          "ulw              %[qload3],    9(%[src])                    \n\t"
+          "dpa.w.ph         $ac3,         %[p1],          %[filter45]  \n\t" /* odd 1 */
+          "extp             %[Temp3],     $ac3,           31           \n\t" /* odd 1 */
+          "lbux             %[st2],       %[Temp2](%[cm])              \n\t" /* even 8 */
+
+          /* odd 2. pixel */
+          "mtlo             %[vector_64], $ac2                         \n\t" /* odd 3 */
+          "mthi             $zero,        $ac2                         \n\t"
+          "preceu.ph.qbr    %[p1],        %[qload3]                    \n\t"
+          "preceu.ph.qbl    %[p5],        %[qload3]                    \n\t"
+          "sb               %[st2],       14(%[dst])                   \n\t" /* even 8 */
+          "ulw              %[qload1],    13(%[src])                   \n\t"
+          "dpa.w.ph         $ac1,         %[p2],          %[filter45]  \n\t" /* odd 2 */
+          "extp             %[Temp1],     $ac1,           31           \n\t" /* odd 2 */
+          "lbux             %[st3],       %[Temp3](%[cm])              \n\t" /* odd 1 */
+
+          /* odd 3. pixel */
+          "mtlo             %[vector_64], $ac3                         \n\t" /* odd 4 */
+          "mthi             $zero,        $ac3                         \n\t"
+          "preceu.ph.qbr    %[p2],        %[qload1]                    \n\t"
+          "sb               %[st3],       1(%[dst])                    \n\t" /* odd 1 */
+          "dpa.w.ph         $ac2,         %[p3],          %[filter45]  \n\t" /* odd 3 */
+          "extp             %[Temp2],     $ac2,           31           \n\t" /* odd 3 */
+          "lbux             %[st1],       %[Temp1](%[cm])              \n\t" /* odd 2 */
+
+          /* odd 4. pixel */
+          "mtlo             %[vector_64], $ac1                         \n\t" /* odd 5 */
+          "mthi             $zero,        $ac1                         \n\t"
+          "preceu.ph.qbl    %[p3],        %[qload1]                    \n\t"
+          "sb               %[st1],       3(%[dst])                    \n\t" /* odd 2 */
+          "dpa.w.ph         $ac3,         %[p4],          %[filter45]  \n\t" /* odd 4 */
+          "extp             %[Temp3],     $ac3,           31           \n\t" /* odd 4 */
+          "lbux             %[st2],       %[Temp2](%[cm])              \n\t" /* odd 3 */
+
+          /* odd 5. pixel */
+          "mtlo             %[vector_64], $ac2                         \n\t" /* odd 6 */
+          "mthi             $zero,        $ac2                         \n\t"
+          "sb               %[st2],       5(%[dst])                    \n\t" /* odd 3 */
+          "dpa.w.ph         $ac1,         %[p1],          %[filter45]  \n\t" /* odd 5 */
+          "extp             %[Temp1],     $ac1,           31           \n\t" /* odd 5 */
+          "lbux             %[st3],       %[Temp3](%[cm])              \n\t" /* odd 4 */
+
+          /* odd 6. pixel */
+          "mtlo             %[vector_64], $ac3                         \n\t" /* odd 7 */
+          "mthi             $zero,        $ac3                         \n\t"
+          "sb               %[st3],       7(%[dst])                    \n\t" /* odd 4 */
+          "dpa.w.ph         $ac2,         %[p5],          %[filter45]  \n\t" /* odd 6 */
+          "extp             %[Temp2],     $ac2,           31           \n\t" /* odd 6 */
+          "lbux             %[st1],       %[Temp1](%[cm])              \n\t" /* odd 5 */
+
+          /* odd 7. pixel */
+          "mtlo             %[vector_64], $ac1                         \n\t" /* odd 8 */
+          "mthi             $zero,        $ac1                         \n\t"
+          "sb               %[st1],       9(%[dst])                    \n\t" /* odd 5 */
+          "dpa.w.ph         $ac3,         %[p2],          %[filter45]  \n\t" /* odd 7 */
+          "extp             %[Temp3],     $ac3,           31           \n\t" /* odd 7 */
+
+          /* odd 8. pixel */
+          "dpa.w.ph         $ac1,         %[p3],          %[filter45]  \n\t" /* odd 8 */
+          "extp             %[Temp1],     $ac1,           31           \n\t" /* odd 8 */
+
+          "lbux             %[st2],       %[Temp2](%[cm])              \n\t" /* odd 6 */
+          "lbux             %[st3],       %[Temp3](%[cm])              \n\t" /* odd 7 */
+          "lbux             %[st1],       %[Temp1](%[cm])              \n\t" /* odd 8 */
+
+          "sb               %[st2],       11(%[dst])                   \n\t" /* odd 6 */
+          "sb               %[st3],       13(%[dst])                   \n\t" /* odd 7 */
+          "sb               %[st1],       15(%[dst])                   \n\t" /* odd 8 */
+
+          : [qload1] "=&r" (qload1), [qload2] "=&r" (qload2), [qload3] "=&r" (qload3),
+            [st1] "=&r" (st1), [st2] "=&r" (st2), [st3] "=&r" (st3),
+            [p1] "=&r" (p1), [p2] "=&r" (p2), [p3] "=&r" (p3), [p4] "=&r" (p4),
+            [p5] "=&r" (p5),
+            [Temp1] "=&r" (Temp1), [Temp2] "=&r" (Temp2), [Temp3] "=&r" (Temp3)
+          : [filter45] "r" (filter45), [vector_64] "r" (vector_64),
+            [cm] "r" (cm), [dst] "r" (dst), [src] "r" (src)
+      );
+
+      src += 16;
+      dst += 16;
+    }
+
+    /* Next row... */
+    src_ptr += src_stride;
+    dst_ptr += dst_stride;
+  }
+}
+
+void vpx_convolve2_horiz_dspr2(const uint8_t *src, ptrdiff_t src_stride,
+                               uint8_t *dst, ptrdiff_t dst_stride,
+                               const int16_t *filter_x, int x_step_q4,
+                               const int16_t *filter_y, int y_step_q4,
+                               int w, int h) {
+  if (16 == x_step_q4) {
+    uint32_t pos = 38;
+
+    prefetch_load((const uint8_t *)filter_x);
+
+    /* bit positon for extract from acc */
+    __asm__ __volatile__ (
+      "wrdsp      %[pos],     1           \n\t"
+      :
+      : [pos] "r" (pos)
+    );
+
+    /* prefetch data to cache memory */
+    prefetch_load(src);
+    prefetch_load(src + 32);
+    prefetch_store(dst);
+
+    switch (w) {
+      case 4:
+        convolve_bi_horiz_4_dspr2(src, (int32_t)src_stride,
+                                  dst, (int32_t)dst_stride,
+                                  filter_x, (int32_t)h);
+        break;
+      case 8:
+        convolve_bi_horiz_8_dspr2(src, (int32_t)src_stride,
+                                  dst, (int32_t)dst_stride,
+                                  filter_x, (int32_t)h);
+        break;
+      case 16:
+        convolve_bi_horiz_16_dspr2(src, (int32_t)src_stride,
+                                   dst, (int32_t)dst_stride,
+                                   filter_x, (int32_t)h, 1);
+        break;
+      case 32:
+        convolve_bi_horiz_16_dspr2(src, (int32_t)src_stride,
+                                   dst, (int32_t)dst_stride,
+                                   filter_x, (int32_t)h, 2);
+        break;
+      case 64:
+        prefetch_load(src + 64);
+        prefetch_store(dst + 32);
+
+        convolve_bi_horiz_64_dspr2(src, (int32_t)src_stride,
+                                   dst, (int32_t)dst_stride,
+                                   filter_x, (int32_t)h);
+        break;
+      default:
+        vpx_convolve8_horiz_c(src, src_stride,
+                              dst, dst_stride,
+                              filter_x, x_step_q4,
+                              filter_y, y_step_q4,
+                              w, h);
+        break;
+    }
+  } else {
+    vpx_convolve8_horiz_c(src, src_stride,
+                          dst, dst_stride,
+                          filter_x, x_step_q4,
+                          filter_y, y_step_q4,
+                          w, h);
+  }
+}
+#endif
--- /dev/null
+++ b/vpx_dsp/mips/convolve2_vert_dspr2.c
@@ -1,0 +1,264 @@
+/*
+ *  Copyright (c) 2013 The WebM project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <assert.h>
+#include <stdio.h>
+
+#include "./vpx_dsp_rtcd.h"
+#include "vpx_dsp/mips/convolve_common_dspr2.h"
+#include "vpx_dsp/vpx_convolve.h"
+#include "vpx_dsp/vpx_dsp_common.h"
+#include "vpx_ports/mem.h"
+
+#if HAVE_DSPR2
+static void convolve_bi_vert_4_dspr2(const uint8_t *src,
+                                     int32_t src_stride,
+                                     uint8_t *dst,
+                                     int32_t dst_stride,
+                                     const int16_t *filter_y,
+                                     int32_t w,
+                                     int32_t h) {
+  int32_t       x, y;
+  const uint8_t *src_ptr;
+  uint8_t       *dst_ptr;
+  uint8_t       *cm = vpx_ff_cropTbl;
+  uint32_t      vector4a = 64;
+  uint32_t      load1, load2;
+  uint32_t      p1, p2;
+  uint32_t      scratch1;
+  uint32_t      store1, store2;
+  int32_t       Temp1, Temp2;
+  const int16_t *filter = &filter_y[3];
+  uint32_t      filter45;
+
+  filter45 = ((const int32_t *)filter)[0];
+
+  for (y = h; y--;) {
+    /* prefetch data to cache memory */
+    prefetch_store(dst + dst_stride);
+
+    for (x = 0; x < w; x += 4) {
+      src_ptr = src + x;
+      dst_ptr = dst + x;
+
+      __asm__ __volatile__ (
+          "ulw              %[load1],     0(%[src_ptr])                   \n\t"
+          "add              %[src_ptr],   %[src_ptr],     %[src_stride]   \n\t"
+          "ulw              %[load2],     0(%[src_ptr])                   \n\t"
+
+          "mtlo             %[vector4a],  $ac0                            \n\t"
+          "mtlo             %[vector4a],  $ac1                            \n\t"
+          "mtlo             %[vector4a],  $ac2                            \n\t"
+          "mtlo             %[vector4a],  $ac3                            \n\t"
+          "mthi             $zero,        $ac0                            \n\t"
+          "mthi             $zero,        $ac1                            \n\t"
+          "mthi             $zero,        $ac2                            \n\t"
+          "mthi             $zero,        $ac3                            \n\t"
+
+          "preceu.ph.qbr    %[scratch1],  %[load1]                        \n\t"
+          "preceu.ph.qbr    %[p1],        %[load2]                        \n\t"
+
+          "precrq.ph.w      %[p2],        %[p1],          %[scratch1]     \n\t" /* pixel 2 */
+          "append           %[p1],        %[scratch1],    16              \n\t" /* pixel 1 */
+
+          "dpa.w.ph         $ac0,         %[p1],          %[filter45]     \n\t"
+          "dpa.w.ph         $ac1,         %[p2],          %[filter45]     \n\t"
+
+          "preceu.ph.qbl    %[scratch1],  %[load1]                        \n\t"
+          "preceu.ph.qbl    %[p1],        %[load2]                        \n\t"
+
+          "precrq.ph.w      %[p2],        %[p1],          %[scratch1]     \n\t" /* pixel 2 */
+          "append           %[p1],        %[scratch1],    16              \n\t" /* pixel 1 */
+
+          "dpa.w.ph         $ac2,         %[p1],          %[filter45]     \n\t"
+          "dpa.w.ph         $ac3,         %[p2],          %[filter45]     \n\t"
+
+          "extp             %[Temp1],     $ac0,           31              \n\t"
+          "extp             %[Temp2],     $ac1,           31              \n\t"
+
+          "lbux             %[store1],    %[Temp1](%[cm])                 \n\t"
+          "extp             %[Temp1],     $ac2,           31              \n\t"
+
+          "lbux             %[store2],    %[Temp2](%[cm])                 \n\t"
+          "extp             %[Temp2],     $ac3,           31              \n\t"
+
+          "sb               %[store1],    0(%[dst_ptr])                   \n\t"
+          "sb               %[store2],    1(%[dst_ptr])                   \n\t"
+
+          "lbux             %[store1],    %[Temp1](%[cm])                 \n\t"
+          "lbux             %[store2],    %[Temp2](%[cm])                 \n\t"
+
+          "sb               %[store1],    2(%[dst_ptr])                   \n\t"
+          "sb               %[store2],    3(%[dst_ptr])                   \n\t"
+
+          : [load1] "=&r" (load1), [load2] "=&r" (load2),
+            [p1] "=&r" (p1), [p2] "=&r" (p2),
+            [scratch1] "=&r" (scratch1),
+            [Temp1] "=&r" (Temp1), [Temp2] "=&r" (Temp2),
+            [store1] "=&r" (store1), [store2] "=&r" (store2),
+            [src_ptr] "+r" (src_ptr)
+          : [filter45] "r" (filter45),[vector4a] "r" (vector4a),
+            [src_stride] "r" (src_stride),
+            [cm] "r" (cm), [dst_ptr] "r" (dst_ptr)
+      );
+    }
+
+    /* Next row... */
+    src += src_stride;
+    dst += dst_stride;
+  }
+}
+
+static void convolve_bi_vert_64_dspr2(const uint8_t *src,
+                                      int32_t src_stride,
+                                      uint8_t *dst,
+                                      int32_t dst_stride,
+                                      const int16_t *filter_y,
+                                      int32_t h) {
+  int32_t       x, y;
+  const uint8_t *src_ptr;
+  uint8_t       *dst_ptr;
+  uint8_t       *cm = vpx_ff_cropTbl;
+  uint32_t      vector4a = 64;
+  uint32_t      load1, load2;
+  uint32_t      p1, p2;
+  uint32_t      scratch1;
+  uint32_t      store1, store2;
+  int32_t       Temp1, Temp2;
+  const int16_t *filter = &filter_y[3];
+  uint32_t      filter45;
+
+  filter45 = ((const int32_t *)filter)[0];
+
+  for (y = h; y--;) {
+    /* prefetch data to cache memory */
+    prefetch_store(dst + dst_stride);
+
+    for (x = 0; x < 64; x += 4) {
+      src_ptr = src + x;
+      dst_ptr = dst + x;
+
+      __asm__ __volatile__ (
+          "ulw              %[load1],     0(%[src_ptr])                   \n\t"
+          "add              %[src_ptr],   %[src_ptr],     %[src_stride]   \n\t"
+          "ulw              %[load2],     0(%[src_ptr])                   \n\t"
+
+          "mtlo             %[vector4a],  $ac0                            \n\t"
+          "mtlo             %[vector4a],  $ac1                            \n\t"
+          "mtlo             %[vector4a],  $ac2                            \n\t"
+          "mtlo             %[vector4a],  $ac3                            \n\t"
+          "mthi             $zero,        $ac0                            \n\t"
+          "mthi             $zero,        $ac1                            \n\t"
+          "mthi             $zero,        $ac2                            \n\t"
+          "mthi             $zero,        $ac3                            \n\t"
+
+          "preceu.ph.qbr    %[scratch1],  %[load1]                        \n\t"
+          "preceu.ph.qbr    %[p1],        %[load2]                        \n\t"
+
+          "precrq.ph.w      %[p2],        %[p1],          %[scratch1]     \n\t" /* pixel 2 */
+          "append           %[p1],        %[scratch1],    16              \n\t" /* pixel 1 */
+
+          "dpa.w.ph         $ac0,         %[p1],          %[filter45]     \n\t"
+          "dpa.w.ph         $ac1,         %[p2],          %[filter45]     \n\t"
+
+          "preceu.ph.qbl    %[scratch1],  %[load1]                        \n\t"
+          "preceu.ph.qbl    %[p1],        %[load2]                        \n\t"
+
+          "precrq.ph.w      %[p2],        %[p1],          %[scratch1]     \n\t" /* pixel 2 */
+          "append           %[p1],        %[scratch1],    16              \n\t" /* pixel 1 */
+
+          "dpa.w.ph         $ac2,         %[p1],          %[filter45]     \n\t"
+          "dpa.w.ph         $ac3,         %[p2],          %[filter45]     \n\t"
+
+          "extp             %[Temp1],     $ac0,           31              \n\t"
+          "extp             %[Temp2],     $ac1,           31              \n\t"
+
+          "lbux             %[store1],    %[Temp1](%[cm])                 \n\t"
+          "extp             %[Temp1],     $ac2,           31              \n\t"
+
+          "lbux             %[store2],    %[Temp2](%[cm])                 \n\t"
+          "extp             %[Temp2],     $ac3,           31              \n\t"
+
+          "sb               %[store1],    0(%[dst_ptr])                   \n\t"
+          "sb               %[store2],    1(%[dst_ptr])                   \n\t"
+
+          "lbux             %[store1],    %[Temp1](%[cm])                 \n\t"
+          "lbux             %[store2],    %[Temp2](%[cm])                 \n\t"
+
+          "sb               %[store1],    2(%[dst_ptr])                   \n\t"
+          "sb               %[store2],    3(%[dst_ptr])                   \n\t"
+
+          : [load1] "=&r" (load1), [load2] "=&r" (load2),
+            [p1] "=&r" (p1), [p2] "=&r" (p2),
+            [scratch1] "=&r" (scratch1),
+            [Temp1] "=&r" (Temp1), [Temp2] "=&r" (Temp2),
+            [store1] "=&r" (store1), [store2] "=&r" (store2),
+            [src_ptr] "+r" (src_ptr)
+          : [filter45] "r" (filter45),[vector4a] "r" (vector4a),
+            [src_stride] "r" (src_stride),
+            [cm] "r" (cm), [dst_ptr] "r" (dst_ptr)
+      );
+    }
+
+    /* Next row... */
+    src += src_stride;
+    dst += dst_stride;
+  }
+}
+
+void vpx_convolve2_vert_dspr2(const uint8_t *src, ptrdiff_t src_stride,
+                              uint8_t *dst, ptrdiff_t dst_stride,
+                              const int16_t *filter_x, int x_step_q4,
+                              const int16_t *filter_y, int y_step_q4,
+                              int w, int h) {
+  if (16 == y_step_q4) {
+    uint32_t pos = 38;
+
+    /* bit positon for extract from acc */
+    __asm__ __volatile__ (
+      "wrdsp      %[pos],     1           \n\t"
+      :
+      : [pos] "r" (pos)
+    );
+
+    prefetch_store(dst);
+
+    switch (w) {
+      case 4 :
+      case 8 :
+      case 16 :
+      case 32 :
+        convolve_bi_vert_4_dspr2(src, src_stride,
+                                 dst, dst_stride,
+                                 filter_y, w, h);
+        break;
+      case 64 :
+        prefetch_store(dst + 32);
+        convolve_bi_vert_64_dspr2(src, src_stride,
+                                  dst, dst_stride,
+                                  filter_y, h);
+        break;
+      default:
+        vpx_convolve8_vert_c(src, src_stride,
+                             dst, dst_stride,
+                             filter_x, x_step_q4,
+                             filter_y, y_step_q4,
+                             w, h);
+        break;
+    }
+  } else {
+    vpx_convolve8_vert_c(src, src_stride,
+                         dst, dst_stride,
+                         filter_x, x_step_q4,
+                         filter_y, y_step_q4,
+                         w, h);
+  }
+}
+#endif
--- /dev/null
+++ b/vpx_dsp/mips/convolve8_avg_dspr2.c
@@ -1,0 +1,693 @@
+/*
+ *  Copyright (c) 2013 The WebM project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <assert.h>
+#include <stdio.h>
+
+#include "./vpx_dsp_rtcd.h"
+#include "vpx_dsp/mips/convolve_common_dspr2.h"
+#include "vpx_dsp/vpx_convolve.h"
+#include "vpx_dsp/vpx_dsp_common.h"
+#include "vpx_ports/mem.h"
+
+#if HAVE_DSPR2
+static void convolve_avg_vert_4_dspr2(const uint8_t *src,
+                                      int32_t src_stride,
+                                      uint8_t *dst,
+                                      int32_t dst_stride,
+                                      const int16_t *filter_y,
+                                      int32_t w,
+                                      int32_t h) {
+  int32_t       x, y;
+  const uint8_t *src_ptr;
+  uint8_t       *dst_ptr;
+  uint8_t       *cm = vpx_ff_cropTbl;
+  uint32_t      vector4a = 64;
+  uint32_t      load1, load2, load3, load4;
+  uint32_t      p1, p2;
+  uint32_t      n1, n2;
+  uint32_t      scratch1, scratch2;
+  uint32_t      store1, store2;
+  int32_t       vector1b, vector2b, vector3b, vector4b;
+  int32_t       Temp1, Temp2;
+
+  vector1b = ((const int32_t *)filter_y)[0];
+  vector2b = ((const int32_t *)filter_y)[1];
+  vector3b = ((const int32_t *)filter_y)[2];
+  vector4b = ((const int32_t *)filter_y)[3];
+
+  src -= 3 * src_stride;
+
+  for (y = h; y--;) {
+    /* prefetch data to cache memory */
+    prefetch_store(dst + dst_stride);
+
+    for (x = 0; x < w; x += 4) {
+      src_ptr = src + x;
+      dst_ptr = dst + x;
+
+      __asm__ __volatile__ (
+          "ulw              %[load1],     0(%[src_ptr])                   \n\t"
+          "add              %[src_ptr],   %[src_ptr],     %[src_stride]   \n\t"
+          "ulw              %[load2],     0(%[src_ptr])                   \n\t"
+          "add              %[src_ptr],   %[src_ptr],     %[src_stride]   \n\t"
+          "ulw              %[load3],     0(%[src_ptr])                   \n\t"
+          "add              %[src_ptr],   %[src_ptr],     %[src_stride]   \n\t"
+          "ulw              %[load4],     0(%[src_ptr])                   \n\t"
+
+          "mtlo             %[vector4a],  $ac0                            \n\t"
+          "mtlo             %[vector4a],  $ac1                            \n\t"
+          "mtlo             %[vector4a],  $ac2                            \n\t"
+          "mtlo             %[vector4a],  $ac3                            \n\t"
+          "mthi             $zero,        $ac0                            \n\t"
+          "mthi             $zero,        $ac1                            \n\t"
+          "mthi             $zero,        $ac2                            \n\t"
+          "mthi             $zero,        $ac3                            \n\t"
+
+          "preceu.ph.qbr    %[scratch1],  %[load1]                        \n\t"
+          "preceu.ph.qbr    %[p1],        %[load2]                        \n\t"
+          "precrq.ph.w      %[n1],        %[p1],          %[scratch1]     \n\t" /* pixel 2 */
+          "append           %[p1],        %[scratch1],    16              \n\t" /* pixel 1 */
+          "preceu.ph.qbr    %[scratch2],  %[load3]                        \n\t"
+          "preceu.ph.qbr    %[p2],        %[load4]                        \n\t"
+          "precrq.ph.w      %[n2],        %[p2],          %[scratch2]     \n\t" /* pixel 2 */
+          "append           %[p2],        %[scratch2],    16              \n\t" /* pixel 1 */
+
+          "dpa.w.ph         $ac0,         %[p1],          %[vector1b]     \n\t"
+          "dpa.w.ph         $ac0,         %[p2],          %[vector2b]     \n\t"
+          "dpa.w.ph         $ac1,         %[n1],          %[vector1b]     \n\t"
+          "dpa.w.ph         $ac1,         %[n2],          %[vector2b]     \n\t"
+
+          "preceu.ph.qbl    %[scratch1],  %[load1]                        \n\t"
+          "preceu.ph.qbl    %[p1],        %[load2]                        \n\t"
+          "precrq.ph.w      %[n1],        %[p1],          %[scratch1]     \n\t" /* pixel 2 */
+          "append           %[p1],        %[scratch1],    16              \n\t" /* pixel 1 */
+          "preceu.ph.qbl    %[scratch2],  %[load3]                        \n\t"
+          "preceu.ph.qbl    %[p2],        %[load4]                        \n\t"
+          "precrq.ph.w      %[n2],        %[p2],          %[scratch2]     \n\t" /* pixel 2 */
+          "append           %[p2],        %[scratch2],    16              \n\t" /* pixel 1 */
+
+          "dpa.w.ph         $ac2,         %[p1],          %[vector1b]     \n\t"
+          "dpa.w.ph         $ac2,         %[p2],          %[vector2b]     \n\t"
+          "dpa.w.ph         $ac3,         %[n1],          %[vector1b]     \n\t"
+          "dpa.w.ph         $ac3,         %[n2],          %[vector2b]     \n\t"
+
+          "add              %[src_ptr],   %[src_ptr],     %[src_stride]   \n\t"
+          "ulw              %[load1],     0(%[src_ptr])                   \n\t"
+          "add              %[src_ptr],   %[src_ptr],     %[src_stride]   \n\t"
+          "ulw              %[load2],     0(%[src_ptr])                   \n\t"
+          "add              %[src_ptr],   %[src_ptr],     %[src_stride]   \n\t"
+          "ulw              %[load3],     0(%[src_ptr])                   \n\t"
+          "add              %[src_ptr],   %[src_ptr],     %[src_stride]   \n\t"
+          "ulw              %[load4],     0(%[src_ptr])                   \n\t"
+
+          "preceu.ph.qbr    %[scratch1],  %[load1]                        \n\t"
+          "preceu.ph.qbr    %[p1],        %[load2]                        \n\t"
+          "precrq.ph.w      %[n1],        %[p1],          %[scratch1]     \n\t" /* pixel 2 */
+          "append           %[p1],        %[scratch1],    16              \n\t" /* pixel 1 */
+          "preceu.ph.qbr    %[scratch2],  %[load3]                        \n\t"
+          "preceu.ph.qbr    %[p2],        %[load4]                        \n\t"
+          "precrq.ph.w      %[n2],        %[p2],          %[scratch2]     \n\t" /* pixel 2 */
+          "append           %[p2],        %[scratch2],    16              \n\t" /* pixel 1 */
+
+          "dpa.w.ph         $ac0,         %[p1],          %[vector3b]     \n\t"
+          "dpa.w.ph         $ac0,         %[p2],          %[vector4b]     \n\t"
+          "extp             %[Temp1],     $ac0,           31              \n\t"
+          "dpa.w.ph         $ac1,         %[n1],          %[vector3b]     \n\t"
+          "dpa.w.ph         $ac1,         %[n2],          %[vector4b]     \n\t"
+          "extp             %[Temp2],     $ac1,           31              \n\t"
+
+          "preceu.ph.qbl    %[scratch1],  %[load1]                        \n\t"
+          "preceu.ph.qbl    %[p1],        %[load2]                        \n\t"
+          "precrq.ph.w      %[n1],        %[p1],          %[scratch1]     \n\t" /* pixel 2 */
+          "append           %[p1],        %[scratch1],    16              \n\t" /* pixel 1 */
+          "lbu              %[scratch1],  0(%[dst_ptr])                   \n\t"
+          "preceu.ph.qbl    %[scratch2],  %[load3]                        \n\t"
+          "preceu.ph.qbl    %[p2],        %[load4]                        \n\t"
+          "precrq.ph.w      %[n2],        %[p2],          %[scratch2]     \n\t" /* pixel 2 */
+          "append           %[p2],        %[scratch2],    16              \n\t" /* pixel 1 */
+          "lbu              %[scratch2],  1(%[dst_ptr])                   \n\t"
+
+          "lbux             %[store1],    %[Temp1](%[cm])                 \n\t"
+          "dpa.w.ph         $ac2,         %[p1],          %[vector3b]     \n\t"
+          "dpa.w.ph         $ac2,         %[p2],          %[vector4b]     \n\t"
+          "addqh_r.w        %[store1],    %[store1],      %[scratch1]     \n\t" /* pixel 1 */
+          "extp             %[Temp1],     $ac2,           31              \n\t"
+
+          "lbux             %[store2],    %[Temp2](%[cm])                 \n\t"
+          "dpa.w.ph         $ac3,         %[n1],          %[vector3b]     \n\t"
+          "dpa.w.ph         $ac3,         %[n2],          %[vector4b]     \n\t"
+          "addqh_r.w        %[store2],    %[store2],      %[scratch2]     \n\t" /* pixel 2 */
+          "extp             %[Temp2],     $ac3,           31              \n\t"
+          "lbu              %[scratch1],  2(%[dst_ptr])                   \n\t"
+
+          "sb               %[store1],    0(%[dst_ptr])                   \n\t"
+          "sb               %[store2],    1(%[dst_ptr])                   \n\t"
+          "lbu              %[scratch2],  3(%[dst_ptr])                   \n\t"
+
+          "lbux             %[store1],    %[Temp1](%[cm])                 \n\t"
+          "lbux             %[store2],    %[Temp2](%[cm])                 \n\t"
+          "addqh_r.w        %[store1],    %[store1],      %[scratch1]     \n\t" /* pixel 3 */
+          "addqh_r.w        %[store2],    %[store2],      %[scratch2]     \n\t" /* pixel 4 */
+
+          "sb               %[store1],    2(%[dst_ptr])                   \n\t"
+          "sb               %[store2],    3(%[dst_ptr])                   \n\t"
+
+          : [load1] "=&r" (load1), [load2] "=&r" (load2),
+            [load3] "=&r" (load3), [load4] "=&r" (load4),
+            [p1] "=&r" (p1), [p2] "=&r" (p2), [n1] "=&r" (n1), [n2] "=&r" (n2),
+            [scratch1] "=&r" (scratch1), [scratch2] "=&r" (scratch2),
+            [Temp1] "=&r" (Temp1), [Temp2] "=&r" (Temp2),
+            [store1] "=&r" (store1), [store2] "=&r" (store2),
+            [src_ptr] "+r" (src_ptr)
+          : [vector1b] "r" (vector1b), [vector2b] "r" (vector2b),
+            [vector3b] "r" (vector3b), [vector4b] "r" (vector4b),
+            [vector4a] "r" (vector4a),
+            [src_stride] "r" (src_stride), [cm] "r" (cm), [dst_ptr] "r" (dst_ptr)
+      );
+    }
+
+    /* Next row... */
+    src += src_stride;
+    dst += dst_stride;
+  }
+}
+
+static void convolve_avg_vert_64_dspr2(const uint8_t *src,
+                                       int32_t src_stride,
+                                       uint8_t *dst,
+                                       int32_t dst_stride,
+                                       const int16_t *filter_y,
+                                       int32_t h) {
+  int32_t       x, y;
+  const uint8_t *src_ptr;
+  uint8_t       *dst_ptr;
+  uint8_t       *cm = vpx_ff_cropTbl;
+  uint32_t      vector4a = 64;
+  uint32_t      load1, load2, load3, load4;
+  uint32_t      p1, p2;
+  uint32_t      n1, n2;
+  uint32_t      scratch1, scratch2;
+  uint32_t      store1, store2;
+  int32_t       vector1b, vector2b, vector3b, vector4b;
+  int32_t       Temp1, Temp2;
+
+  vector1b = ((const int32_t *)filter_y)[0];
+  vector2b = ((const int32_t *)filter_y)[1];
+  vector3b = ((const int32_t *)filter_y)[2];
+  vector4b = ((const int32_t *)filter_y)[3];
+
+  src -= 3 * src_stride;
+
+  for (y = h; y--;) {
+    /* prefetch data to cache memory */
+    prefetch_store(dst + dst_stride);
+    prefetch_store(dst + dst_stride + 32);
+
+    for (x = 0; x < 64; x += 4) {
+      src_ptr = src + x;
+      dst_ptr = dst + x;
+
+      __asm__ __volatile__ (
+          "ulw              %[load1],     0(%[src_ptr])                   \n\t"
+          "add              %[src_ptr],   %[src_ptr],     %[src_stride]   \n\t"
+          "ulw              %[load2],     0(%[src_ptr])                   \n\t"
+          "add              %[src_ptr],   %[src_ptr],     %[src_stride]   \n\t"
+          "ulw              %[load3],     0(%[src_ptr])                   \n\t"
+          "add              %[src_ptr],   %[src_ptr],     %[src_stride]   \n\t"
+          "ulw              %[load4],     0(%[src_ptr])                   \n\t"
+
+          "mtlo             %[vector4a],  $ac0                            \n\t"
+          "mtlo             %[vector4a],  $ac1                            \n\t"
+          "mtlo             %[vector4a],  $ac2                            \n\t"
+          "mtlo             %[vector4a],  $ac3                            \n\t"
+          "mthi             $zero,        $ac0                            \n\t"
+          "mthi             $zero,        $ac1                            \n\t"
+          "mthi             $zero,        $ac2                            \n\t"
+          "mthi             $zero,        $ac3                            \n\t"
+
+          "preceu.ph.qbr    %[scratch1],  %[load1]                        \n\t"
+          "preceu.ph.qbr    %[p1],        %[load2]                        \n\t"
+          "precrq.ph.w      %[n1],        %[p1],          %[scratch1]     \n\t" /* pixel 2 */
+          "append           %[p1],        %[scratch1],    16              \n\t" /* pixel 1 */
+          "preceu.ph.qbr    %[scratch2],  %[load3]                        \n\t"
+          "preceu.ph.qbr    %[p2],        %[load4]                        \n\t"
+          "precrq.ph.w      %[n2],        %[p2],          %[scratch2]     \n\t" /* pixel 2 */
+          "append           %[p2],        %[scratch2],    16              \n\t" /* pixel 1 */
+
+          "dpa.w.ph         $ac0,         %[p1],          %[vector1b]     \n\t"
+          "dpa.w.ph         $ac0,         %[p2],          %[vector2b]     \n\t"
+          "dpa.w.ph         $ac1,         %[n1],          %[vector1b]     \n\t"
+          "dpa.w.ph         $ac1,         %[n2],          %[vector2b]     \n\t"
+
+          "preceu.ph.qbl    %[scratch1],  %[load1]                        \n\t"
+          "preceu.ph.qbl    %[p1],        %[load2]                        \n\t"
+          "precrq.ph.w      %[n1],        %[p1],          %[scratch1]     \n\t" /* pixel 2 */
+          "append           %[p1],        %[scratch1],    16              \n\t" /* pixel 1 */
+          "preceu.ph.qbl    %[scratch2],  %[load3]                        \n\t"
+          "preceu.ph.qbl    %[p2],        %[load4]                        \n\t"
+          "precrq.ph.w      %[n2],        %[p2],          %[scratch2]     \n\t" /* pixel 2 */
+          "append           %[p2],        %[scratch2],    16              \n\t" /* pixel 1 */
+
+          "dpa.w.ph         $ac2,         %[p1],          %[vector1b]     \n\t"
+          "dpa.w.ph         $ac2,         %[p2],          %[vector2b]     \n\t"
+          "dpa.w.ph         $ac3,         %[n1],          %[vector1b]     \n\t"
+          "dpa.w.ph         $ac3,         %[n2],          %[vector2b]     \n\t"
+
+          "add              %[src_ptr],   %[src_ptr],     %[src_stride]   \n\t"
+          "ulw              %[load1],     0(%[src_ptr])                   \n\t"
+          "add              %[src_ptr],   %[src_ptr],     %[src_stride]   \n\t"
+          "ulw              %[load2],     0(%[src_ptr])                   \n\t"
+          "add              %[src_ptr],   %[src_ptr],     %[src_stride]   \n\t"
+          "ulw              %[load3],     0(%[src_ptr])                   \n\t"
+          "add              %[src_ptr],   %[src_ptr],     %[src_stride]   \n\t"
+          "ulw              %[load4],     0(%[src_ptr])                   \n\t"
+
+          "preceu.ph.qbr    %[scratch1],  %[load1]                        \n\t"
+          "preceu.ph.qbr    %[p1],        %[load2]                        \n\t"
+          "precrq.ph.w      %[n1],        %[p1],          %[scratch1]     \n\t" /* pixel 2 */
+          "append           %[p1],        %[scratch1],    16              \n\t" /* pixel 1 */
+          "preceu.ph.qbr    %[scratch2],  %[load3]                        \n\t"
+          "preceu.ph.qbr    %[p2],        %[load4]                        \n\t"
+          "precrq.ph.w      %[n2],        %[p2],          %[scratch2]     \n\t" /* pixel 2 */
+          "append           %[p2],        %[scratch2],    16              \n\t" /* pixel 1 */
+
+          "dpa.w.ph         $ac0,         %[p1],          %[vector3b]     \n\t"
+          "dpa.w.ph         $ac0,         %[p2],          %[vector4b]     \n\t"
+          "extp             %[Temp1],     $ac0,           31              \n\t"
+          "dpa.w.ph         $ac1,         %[n1],          %[vector3b]     \n\t"
+          "dpa.w.ph         $ac1,         %[n2],          %[vector4b]     \n\t"
+          "extp             %[Temp2],     $ac1,           31              \n\t"
+
+          "preceu.ph.qbl    %[scratch1],  %[load1]                        \n\t"
+          "preceu.ph.qbl    %[p1],        %[load2]                        \n\t"
+          "precrq.ph.w      %[n1],        %[p1],          %[scratch1]     \n\t" /* pixel 2 */
+          "append           %[p1],        %[scratch1],    16              \n\t" /* pixel 1 */
+          "lbu              %[scratch1],  0(%[dst_ptr])                   \n\t"
+          "preceu.ph.qbl    %[scratch2],  %[load3]                        \n\t"
+          "preceu.ph.qbl    %[p2],        %[load4]                        \n\t"
+          "precrq.ph.w      %[n2],        %[p2],          %[scratch2]     \n\t" /* pixel 2 */
+          "append           %[p2],        %[scratch2],    16              \n\t" /* pixel 1 */
+          "lbu              %[scratch2],  1(%[dst_ptr])                   \n\t"
+
+          "lbux             %[store1],    %[Temp1](%[cm])                 \n\t"
+          "dpa.w.ph         $ac2,         %[p1],          %[vector3b]     \n\t"
+          "dpa.w.ph         $ac2,         %[p2],          %[vector4b]     \n\t"
+          "addqh_r.w        %[store1],    %[store1],      %[scratch1]     \n\t" /* pixel 1 */
+          "extp             %[Temp1],     $ac2,           31              \n\t"
+
+          "lbux             %[store2],    %[Temp2](%[cm])                 \n\t"
+          "dpa.w.ph         $ac3,         %[n1],          %[vector3b]     \n\t"
+          "dpa.w.ph         $ac3,         %[n2],          %[vector4b]     \n\t"
+          "addqh_r.w        %[store2],    %[store2],      %[scratch2]     \n\t" /* pixel 2 */
+          "extp             %[Temp2],     $ac3,           31              \n\t"
+          "lbu              %[scratch1],  2(%[dst_ptr])                   \n\t"
+
+          "sb               %[store1],    0(%[dst_ptr])                   \n\t"
+          "sb               %[store2],    1(%[dst_ptr])                   \n\t"
+          "lbu              %[scratch2],  3(%[dst_ptr])                   \n\t"
+
+          "lbux             %[store1],    %[Temp1](%[cm])                 \n\t"
+          "lbux             %[store2],    %[Temp2](%[cm])                 \n\t"
+          "addqh_r.w        %[store1],    %[store1],      %[scratch1]     \n\t" /* pixel 3 */
+          "addqh_r.w        %[store2],    %[store2],      %[scratch2]     \n\t" /* pixel 4 */
+
+          "sb               %[store1],    2(%[dst_ptr])                   \n\t"
+          "sb               %[store2],    3(%[dst_ptr])                   \n\t"
+
+          : [load1] "=&r" (load1), [load2] "=&r" (load2),
+            [load3] "=&r" (load3), [load4] "=&r" (load4),
+            [p1] "=&r" (p1), [p2] "=&r" (p2), [n1] "=&r" (n1), [n2] "=&r" (n2),
+            [scratch1] "=&r" (scratch1), [scratch2] "=&r" (scratch2),
+            [Temp1] "=&r" (Temp1), [Temp2] "=&r" (Temp2),
+            [store1] "=&r" (store1), [store2] "=&r" (store2),
+            [src_ptr] "+r" (src_ptr)
+          : [vector1b] "r" (vector1b), [vector2b] "r" (vector2b),
+            [vector3b] "r" (vector3b), [vector4b] "r" (vector4b),
+            [vector4a] "r" (vector4a),
+            [src_stride] "r" (src_stride), [cm] "r" (cm), [dst_ptr] "r" (dst_ptr)
+      );
+    }
+
+    /* Next row... */
+    src += src_stride;
+    dst += dst_stride;
+  }
+}
+
+void vpx_convolve8_avg_vert_dspr2(const uint8_t *src, ptrdiff_t src_stride,
+                                  uint8_t *dst, ptrdiff_t dst_stride,
+                                  const int16_t *filter_x, int x_step_q4,
+                                  const int16_t *filter_y, int y_step_q4,
+                                  int w, int h) {
+  if (((const int32_t *)filter_y)[1] == 0x800000) {
+    vpx_convolve_avg(src, src_stride,
+                     dst, dst_stride,
+                     filter_x, x_step_q4,
+                     filter_y, y_step_q4,
+                     w, h);
+  } else if (((const int32_t *)filter_y)[0] == 0) {
+    vpx_convolve2_avg_vert_dspr2(src, src_stride,
+                                 dst, dst_stride,
+                                 filter_x, x_step_q4,
+                                 filter_y, y_step_q4,
+                                 w, h);
+  } else {
+    if (16 == y_step_q4) {
+      uint32_t pos = 38;
+
+      /* bit positon for extract from acc */
+      __asm__ __volatile__ (
+        "wrdsp      %[pos],     1           \n\t"
+        :
+        : [pos] "r" (pos)
+      );
+
+      prefetch_store(dst);
+
+      switch (w) {
+        case 4:
+        case 8:
+        case 16:
+        case 32:
+          convolve_avg_vert_4_dspr2(src, src_stride,
+                                    dst, dst_stride,
+                                    filter_y, w, h);
+          break;
+        case 64:
+          prefetch_store(dst + 32);
+          convolve_avg_vert_64_dspr2(src, src_stride,
+                                     dst, dst_stride,
+                                     filter_y, h);
+          break;
+        default:
+          vpx_convolve8_avg_vert_c(src, src_stride,
+                                   dst, dst_stride,
+                                   filter_x, x_step_q4,
+                                   filter_y, y_step_q4,
+                                   w, h);
+          break;
+      }
+    } else {
+      vpx_convolve8_avg_vert_c(src, src_stride,
+                               dst, dst_stride,
+                               filter_x, x_step_q4,
+                               filter_y, y_step_q4,
+                               w, h);
+    }
+  }
+}
+
+void vpx_convolve8_avg_dspr2(const uint8_t *src, ptrdiff_t src_stride,
+                             uint8_t *dst, ptrdiff_t dst_stride,
+                             const int16_t *filter_x, int x_step_q4,
+                             const int16_t *filter_y, int y_step_q4,
+                             int w, int h) {
+  /* Fixed size intermediate buffer places limits on parameters. */
+  DECLARE_ALIGNED(32, uint8_t, temp[64 * 135]);
+  int32_t intermediate_height = ((h * y_step_q4) >> 4) + 7;
+
+  assert(w <= 64);
+  assert(h <= 64);
+
+  if (intermediate_height < h)
+    intermediate_height = h;
+
+  if (x_step_q4 != 16 || y_step_q4 != 16)
+    return vpx_convolve8_avg_c(src, src_stride,
+                               dst, dst_stride,
+                               filter_x, x_step_q4,
+                               filter_y, y_step_q4,
+                               w, h);
+
+  vpx_convolve8_horiz(src - (src_stride * 3), src_stride,
+                      temp, 64,
+                      filter_x, x_step_q4,
+                      filter_y, y_step_q4,
+                      w, intermediate_height);
+
+  vpx_convolve8_avg_vert(temp + 64 * 3, 64,
+                         dst, dst_stride,
+                         filter_x, x_step_q4,
+                         filter_y, y_step_q4,
+                         w, h);
+}
+
+void vpx_convolve_avg_dspr2(const uint8_t *src, ptrdiff_t src_stride,
+                            uint8_t *dst, ptrdiff_t dst_stride,
+                            const int16_t *filter_x, int filter_x_stride,
+                            const int16_t *filter_y, int filter_y_stride,
+                            int w, int h) {
+  int x, y;
+  uint32_t tp1, tp2, tn1;
+  uint32_t tp3, tp4, tn2;
+
+  /* prefetch data to cache memory */
+  prefetch_load(src);
+  prefetch_load(src + 32);
+  prefetch_store(dst);
+
+  switch (w) {
+    case 4:
+      /* 1 word storage */
+      for (y = h; y--; ) {
+        prefetch_load(src + src_stride);
+        prefetch_load(src + src_stride + 32);
+        prefetch_store(dst + dst_stride);
+
+        __asm__ __volatile__ (
+            "ulw              %[tp1],         0(%[src])      \n\t"
+            "ulw              %[tp2],         0(%[dst])      \n\t"
+            "adduh_r.qb       %[tn1], %[tp2], %[tp1]         \n\t"  /* average */
+            "sw               %[tn1],         0(%[dst])      \n\t"  /* store */
+
+            : [tn1] "=&r" (tn1), [tp1] "=&r" (tp1),
+              [tp2] "=&r" (tp2)
+            : [src] "r" (src), [dst] "r" (dst)
+        );
+
+        src += src_stride;
+        dst += dst_stride;
+      }
+      break;
+    case 8:
+      /* 2 word storage */
+      for (y = h; y--; ) {
+        prefetch_load(src + src_stride);
+        prefetch_load(src + src_stride + 32);
+        prefetch_store(dst + dst_stride);
+
+        __asm__ __volatile__ (
+            "ulw              %[tp1],         0(%[src])      \n\t"
+            "ulw              %[tp2],         0(%[dst])      \n\t"
+            "ulw              %[tp3],         4(%[src])      \n\t"
+            "ulw              %[tp4],         4(%[dst])      \n\t"
+            "adduh_r.qb       %[tn1], %[tp2], %[tp1]         \n\t"  /* average */
+            "sw               %[tn1],         0(%[dst])      \n\t"  /* store */
+            "adduh_r.qb       %[tn2], %[tp3], %[tp4]         \n\t"  /* average */
+            "sw               %[tn2],         4(%[dst])      \n\t"  /* store */
+
+            : [tp1] "=&r" (tp1), [tp2] "=&r" (tp2),
+              [tp3] "=&r" (tp3), [tp4] "=&r" (tp4),
+              [tn1] "=&r" (tn1), [tn2] "=&r" (tn2)
+            : [src] "r" (src), [dst] "r" (dst)
+        );
+
+        src += src_stride;
+        dst += dst_stride;
+      }
+      break;
+    case 16:
+      /* 4 word storage */
+      for (y = h; y--; ) {
+        prefetch_load(src + src_stride);
+        prefetch_load(src + src_stride + 32);
+        prefetch_store(dst + dst_stride);
+
+        __asm__ __volatile__ (
+            "ulw              %[tp1],         0(%[src])      \n\t"
+            "ulw              %[tp2],         0(%[dst])      \n\t"
+            "ulw              %[tp3],         4(%[src])      \n\t"
+            "ulw              %[tp4],         4(%[dst])      \n\t"
+            "adduh_r.qb       %[tn1], %[tp2], %[tp1]         \n\t"  /* average */
+            "ulw              %[tp1],         8(%[src])      \n\t"
+            "ulw              %[tp2],         8(%[dst])      \n\t"
+            "sw               %[tn1],         0(%[dst])      \n\t"  /* store */
+            "adduh_r.qb       %[tn2], %[tp3], %[tp4]         \n\t"  /* average */
+            "sw               %[tn2],         4(%[dst])      \n\t"  /* store */
+            "ulw              %[tp3],         12(%[src])     \n\t"
+            "ulw              %[tp4],         12(%[dst])     \n\t"
+            "adduh_r.qb       %[tn1], %[tp2], %[tp1]         \n\t"  /* average */
+            "sw               %[tn1],         8(%[dst])      \n\t"  /* store */
+            "adduh_r.qb       %[tn2], %[tp3], %[tp4]         \n\t"  /* average */
+            "sw               %[tn2],         12(%[dst])     \n\t"  /* store */
+
+            : [tp1] "=&r" (tp1), [tp2] "=&r" (tp2),
+              [tp3] "=&r" (tp3), [tp4] "=&r" (tp4),
+              [tn1] "=&r" (tn1), [tn2] "=&r" (tn2)
+            : [src] "r" (src), [dst] "r" (dst)
+        );
+
+        src += src_stride;
+        dst += dst_stride;
+      }
+      break;
+    case 32:
+      /* 8 word storage */
+      for (y = h; y--; ) {
+        prefetch_load(src + src_stride);
+        prefetch_load(src + src_stride + 32);
+        prefetch_store(dst + dst_stride);
+
+        __asm__ __volatile__ (
+            "ulw              %[tp1],         0(%[src])      \n\t"
+            "ulw              %[tp2],         0(%[dst])      \n\t"
+            "ulw              %[tp3],         4(%[src])      \n\t"
+            "ulw              %[tp4],         4(%[dst])      \n\t"
+            "adduh_r.qb       %[tn1], %[tp2], %[tp1]         \n\t"  /* average */
+            "ulw              %[tp1],         8(%[src])      \n\t"
+            "ulw              %[tp2],         8(%[dst])      \n\t"
+            "sw               %[tn1],         0(%[dst])      \n\t"  /* store */
+            "adduh_r.qb       %[tn2], %[tp3], %[tp4]         \n\t"  /* average */
+            "sw               %[tn2],         4(%[dst])      \n\t"  /* store */
+            "ulw              %[tp3],         12(%[src])     \n\t"
+            "ulw              %[tp4],         12(%[dst])     \n\t"
+            "adduh_r.qb       %[tn1], %[tp2], %[tp1]         \n\t"  /* average */
+            "ulw              %[tp1],         16(%[src])     \n\t"
+            "ulw              %[tp2],         16(%[dst])     \n\t"
+            "sw               %[tn1],         8(%[dst])      \n\t"  /* store */
+            "adduh_r.qb       %[tn2], %[tp3], %[tp4]         \n\t"  /* average */
+            "sw               %[tn2],         12(%[dst])     \n\t"  /* store */
+            "ulw              %[tp3],         20(%[src])     \n\t"
+            "ulw              %[tp4],         20(%[dst])     \n\t"
+            "adduh_r.qb       %[tn1], %[tp2], %[tp1]         \n\t"  /* average */
+            "ulw              %[tp1],         24(%[src])     \n\t"
+            "ulw              %[tp2],         24(%[dst])     \n\t"
+            "sw               %[tn1],         16(%[dst])     \n\t"  /* store */
+            "adduh_r.qb       %[tn2], %[tp3], %[tp4]         \n\t"  /* average */
+            "sw               %[tn2],         20(%[dst])     \n\t"  /* store */
+            "ulw              %[tp3],         28(%[src])     \n\t"
+            "ulw              %[tp4],         28(%[dst])     \n\t"
+            "adduh_r.qb       %[tn1], %[tp2], %[tp1]         \n\t"  /* average */
+            "sw               %[tn1],         24(%[dst])     \n\t"  /* store */
+            "adduh_r.qb       %[tn2], %[tp3], %[tp4]         \n\t"  /* average */
+            "sw               %[tn2],         28(%[dst])     \n\t"  /* store */
+
+            : [tp1] "=&r" (tp1), [tp2] "=&r" (tp2),
+              [tp3] "=&r" (tp3), [tp4] "=&r" (tp4),
+              [tn1] "=&r" (tn1), [tn2] "=&r" (tn2)
+            : [src] "r" (src), [dst] "r" (dst)
+        );
+
+        src += src_stride;
+        dst += dst_stride;
+      }
+      break;
+    case 64:
+      prefetch_load(src + 64);
+      prefetch_store(dst + 32);
+
+      /* 16 word storage */
+      for (y = h; y--; ) {
+        prefetch_load(src + src_stride);
+        prefetch_load(src + src_stride + 32);
+        prefetch_load(src + src_stride + 64);
+        prefetch_store(dst + dst_stride);
+        prefetch_store(dst + dst_stride + 32);
+
+        __asm__ __volatile__ (
+            "ulw              %[tp1],         0(%[src])      \n\t"
+            "ulw              %[tp2],         0(%[dst])      \n\t"
+            "ulw              %[tp3],         4(%[src])      \n\t"
+            "ulw              %[tp4],         4(%[dst])      \n\t"
+            "adduh_r.qb       %[tn1], %[tp2], %[tp1]         \n\t"  /* average */
+            "ulw              %[tp1],         8(%[src])      \n\t"
+            "ulw              %[tp2],         8(%[dst])      \n\t"
+            "sw               %[tn1],         0(%[dst])      \n\t"  /* store */
+            "adduh_r.qb       %[tn2], %[tp3], %[tp4]         \n\t"  /* average */
+            "sw               %[tn2],         4(%[dst])      \n\t"  /* store */
+            "ulw              %[tp3],         12(%[src])     \n\t"
+            "ulw              %[tp4],         12(%[dst])     \n\t"
+            "adduh_r.qb       %[tn1], %[tp2], %[tp1]         \n\t"  /* average */
+            "ulw              %[tp1],         16(%[src])     \n\t"
+            "ulw              %[tp2],         16(%[dst])     \n\t"
+            "sw               %[tn1],         8(%[dst])      \n\t"  /* store */
+            "adduh_r.qb       %[tn2], %[tp3], %[tp4]         \n\t"  /* average */
+            "sw               %[tn2],         12(%[dst])     \n\t"  /* store */
+            "ulw              %[tp3],         20(%[src])     \n\t"
+            "ulw              %[tp4],         20(%[dst])     \n\t"
+            "adduh_r.qb       %[tn1], %[tp2], %[tp1]         \n\t"  /* average */
+            "ulw              %[tp1],         24(%[src])     \n\t"
+            "ulw              %[tp2],         24(%[dst])     \n\t"
+            "sw               %[tn1],         16(%[dst])     \n\t"  /* store */
+            "adduh_r.qb       %[tn2], %[tp3], %[tp4]         \n\t"  /* average */
+            "sw               %[tn2],         20(%[dst])     \n\t"  /* store */
+            "ulw              %[tp3],         28(%[src])     \n\t"
+            "ulw              %[tp4],         28(%[dst])     \n\t"
+            "adduh_r.qb       %[tn1], %[tp2], %[tp1]         \n\t"  /* average */
+            "ulw              %[tp1],         32(%[src])     \n\t"
+            "ulw              %[tp2],         32(%[dst])     \n\t"
+            "sw               %[tn1],         24(%[dst])     \n\t"  /* store */
+            "adduh_r.qb       %[tn2], %[tp3], %[tp4]         \n\t"  /* average */
+            "sw               %[tn2],         28(%[dst])     \n\t"  /* store */
+            "ulw              %[tp3],         36(%[src])     \n\t"
+            "ulw              %[tp4],         36(%[dst])     \n\t"
+            "adduh_r.qb       %[tn1], %[tp2], %[tp1]         \n\t"  /* average */
+            "ulw              %[tp1],         40(%[src])     \n\t"
+            "ulw              %[tp2],         40(%[dst])     \n\t"
+            "sw               %[tn1],         32(%[dst])     \n\t"  /* store */
+            "adduh_r.qb       %[tn2], %[tp3], %[tp4]         \n\t"  /* average */
+            "sw               %[tn2],         36(%[dst])     \n\t"  /* store */
+            "ulw              %[tp3],         44(%[src])     \n\t"
+            "ulw              %[tp4],         44(%[dst])     \n\t"
+            "adduh_r.qb       %[tn1], %[tp2], %[tp1]         \n\t"  /* average */
+            "ulw              %[tp1],         48(%[src])     \n\t"
+            "ulw              %[tp2],         48(%[dst])     \n\t"
+            "sw               %[tn1],         40(%[dst])     \n\t"  /* store */
+            "adduh_r.qb       %[tn2], %[tp3], %[tp4]         \n\t"  /* average */
+            "sw               %[tn2],         44(%[dst])     \n\t"  /* store */
+            "ulw              %[tp3],         52(%[src])     \n\t"
+            "ulw              %[tp4],         52(%[dst])     \n\t"
+            "adduh_r.qb       %[tn1], %[tp2], %[tp1]         \n\t"  /* average */
+            "ulw              %[tp1],         56(%[src])     \n\t"
+            "ulw              %[tp2],         56(%[dst])     \n\t"
+            "sw               %[tn1],         48(%[dst])     \n\t"  /* store */
+            "adduh_r.qb       %[tn2], %[tp3], %[tp4]         \n\t"  /* average */
+            "sw               %[tn2],         52(%[dst])     \n\t"  /* store */
+            "ulw              %[tp3],         60(%[src])     \n\t"
+            "ulw              %[tp4],         60(%[dst])     \n\t"
+            "adduh_r.qb       %[tn1], %[tp2], %[tp1]         \n\t"  /* average */
+            "sw               %[tn1],         56(%[dst])     \n\t"  /* store */
+            "adduh_r.qb       %[tn2], %[tp3], %[tp4]         \n\t"  /* average */
+            "sw               %[tn2],         60(%[dst])     \n\t"  /* store */
+
+            : [tp1] "=&r" (tp1), [tp2] "=&r" (tp2),
+              [tp3] "=&r" (tp3), [tp4] "=&r" (tp4),
+              [tn1] "=&r" (tn1), [tn2] "=&r" (tn2)
+            : [src] "r" (src), [dst] "r" (dst)
+        );
+
+        src += src_stride;
+        dst += dst_stride;
+      }
+      break;
+    default:
+      for (y = h; y > 0; --y) {
+        for (x = 0; x < w; ++x) {
+          dst[x] = (dst[x] + src[x] + 1) >> 1;
+        }
+
+        src += src_stride;
+        dst += dst_stride;
+      }
+      break;
+  }
+}
+#endif
--- /dev/null
+++ b/vpx_dsp/mips/convolve8_avg_horiz_dspr2.c
@@ -1,0 +1,1036 @@
+/*
+ *  Copyright (c) 2013 The WebM project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <assert.h>
+#include <stdio.h>
+
+#include "./vpx_dsp_rtcd.h"
+#include "vpx_dsp/mips/convolve_common_dspr2.h"
+#include "vpx_dsp/vpx_convolve.h"
+#include "vpx_dsp/vpx_dsp_common.h"
+#include "vpx_ports/mem.h"
+
+#if HAVE_DSPR2
+static void convolve_avg_horiz_4_dspr2(const uint8_t *src,
+                                       int32_t src_stride,
+                                       uint8_t *dst,
+                                       int32_t dst_stride,
+                                       const int16_t *filter_x0,
+                                       int32_t h) {
+  int32_t y;
+  uint8_t *cm = vpx_ff_cropTbl;
+  int32_t  vector1b, vector2b, vector3b, vector4b;
+  int32_t  Temp1, Temp2, Temp3, Temp4;
+  uint32_t vector4a = 64;
+  uint32_t tp1, tp2;
+  uint32_t p1, p2, p3, p4;
+  uint32_t n1, n2, n3, n4;
+  uint32_t tn1, tn2;
+
+  vector1b = ((const int32_t *)filter_x0)[0];
+  vector2b = ((const int32_t *)filter_x0)[1];
+  vector3b = ((const int32_t *)filter_x0)[2];
+  vector4b = ((const int32_t *)filter_x0)[3];
+
+  for (y = h; y--;) {
+    /* prefetch data to cache memory */
+    prefetch_load(src + src_stride);
+    prefetch_load(src + src_stride + 32);
+    prefetch_store(dst + dst_stride);
+
+    __asm__ __volatile__ (
+        "ulw              %[tp1],         0(%[src])                      \n\t"
+        "ulw              %[tp2],         4(%[src])                      \n\t"
+
+        /* even 1. pixel */
+        "mtlo             %[vector4a],    $ac3                           \n\t"
+        "mthi             $zero,          $ac3                           \n\t"
+        "preceu.ph.qbr    %[p1],          %[tp1]                         \n\t"
+        "preceu.ph.qbl    %[p2],          %[tp1]                         \n\t"
+        "preceu.ph.qbr    %[p3],          %[tp2]                         \n\t"
+        "preceu.ph.qbl    %[p4],          %[tp2]                         \n\t"
+        "dpa.w.ph         $ac3,           %[p1],          %[vector1b]    \n\t"
+        "dpa.w.ph         $ac3,           %[p2],          %[vector2b]    \n\t"
+        "dpa.w.ph         $ac3,           %[p3],          %[vector3b]    \n\t"
+        "ulw              %[tn2],         8(%[src])                      \n\t"
+        "dpa.w.ph         $ac3,           %[p4],          %[vector4b]    \n\t"
+        "extp             %[Temp1],       $ac3,           31             \n\t"
+
+        /* even 2. pixel */
+        "mtlo             %[vector4a],    $ac2                           \n\t"
+        "mthi             $zero,          $ac2                           \n\t"
+        "preceu.ph.qbr    %[p1],          %[tn2]                         \n\t"
+        "balign           %[tn1],         %[tn2],         3              \n\t"
+        "balign           %[tn2],         %[tp2],         3              \n\t"
+        "balign           %[tp2],         %[tp1],         3              \n\t"
+        "dpa.w.ph         $ac2,           %[p2],          %[vector1b]    \n\t"
+        "dpa.w.ph         $ac2,           %[p3],          %[vector2b]    \n\t"
+        "dpa.w.ph         $ac2,           %[p4],          %[vector3b]    \n\t"
+        "dpa.w.ph         $ac2,           %[p1],          %[vector4b]    \n\t"
+        "extp             %[Temp3],       $ac2,           31             \n\t"
+
+        "lbu              %[p2],          3(%[dst])                      \n\t"  /* load odd 2 */
+
+        /* odd 1. pixel */
+        "lbux             %[tp1],         %[Temp1](%[cm])                \n\t"  /* even 1 */
+        "mtlo             %[vector4a],    $ac3                           \n\t"
+        "mthi             $zero,          $ac3                           \n\t"
+        "lbu              %[Temp1],       1(%[dst])                      \n\t"  /* load odd 1 */
+        "preceu.ph.qbr    %[n1],          %[tp2]                         \n\t"
+        "preceu.ph.qbl    %[n2],          %[tp2]                         \n\t"
+        "preceu.ph.qbr    %[n3],          %[tn2]                         \n\t"
+        "preceu.ph.qbl    %[n4],          %[tn2]                         \n\t"
+        "dpa.w.ph         $ac3,           %[n1],          %[vector1b]    \n\t"
+        "dpa.w.ph         $ac3,           %[n2],          %[vector2b]    \n\t"
+        "dpa.w.ph         $ac3,           %[n3],          %[vector3b]    \n\t"
+        "dpa.w.ph         $ac3,           %[n4],          %[vector4b]    \n\t"
+        "extp             %[Temp2],       $ac3,           31             \n\t"
+
+        "lbu              %[tn2],         0(%[dst])                      \n\t"  /* load even 1 */
+
+        /* odd 2. pixel */
+        "lbux             %[tp2],         %[Temp3](%[cm])                \n\t"  /* even 2 */
+        "mtlo             %[vector4a],    $ac2                           \n\t"
+        "mthi             $zero,          $ac2                           \n\t"
+        "preceu.ph.qbr    %[n1],          %[tn1]                         \n\t"
+        "lbux             %[tn1],         %[Temp2](%[cm])                \n\t"  /* odd 1 */
+        "addqh_r.w        %[tn2],         %[tn2],         %[tp1]         \n\t"  /* average even 1 */
+        "dpa.w.ph         $ac2,           %[n2],          %[vector1b]    \n\t"
+        "dpa.w.ph         $ac2,           %[n3],          %[vector2b]    \n\t"
+        "dpa.w.ph         $ac2,           %[n4],          %[vector3b]    \n\t"
+        "dpa.w.ph         $ac2,           %[n1],          %[vector4b]    \n\t"
+        "extp             %[Temp4],       $ac2,           31             \n\t"
+
+        "lbu              %[tp1],         2(%[dst])                      \n\t"  /* load even 2 */
+        "sb               %[tn2],         0(%[dst])                      \n\t"  /* store even 1 */
+
+        /* clamp */
+        "addqh_r.w        %[Temp1],       %[Temp1],       %[tn1]         \n\t"  /* average odd 1 */
+        "lbux             %[n2],          %[Temp4](%[cm])                \n\t"  /* odd 2 */
+        "sb               %[Temp1],       1(%[dst])                      \n\t"  /* store odd 1 */
+
+        "addqh_r.w        %[tp1],         %[tp1],         %[tp2]         \n\t"  /* average even 2 */
+        "sb               %[tp1],         2(%[dst])                      \n\t"  /* store even 2 */
+
+        "addqh_r.w        %[p2],          %[p2],          %[n2]          \n\t"  /* average odd 2 */
+        "sb               %[p2],          3(%[dst])                      \n\t"  /* store odd 2 */
+
+        : [tp1] "=&r" (tp1), [tp2] "=&r" (tp2),
+          [tn1] "=&r" (tn1), [tn2] "=&r" (tn2),
+          [p1] "=&r" (p1), [p2] "=&r" (p2), [p3] "=&r" (p3), [p4] "=&r" (p4),
+          [n1] "=&r" (n1), [n2] "=&r" (n2), [n3] "=&r" (n3), [n4] "=&r" (n4),
+          [Temp1] "=&r" (Temp1), [Temp2] "=&r" (Temp2),
+          [Temp3] "=&r" (Temp3), [Temp4] "=&r" (Temp4)
+        : [vector1b] "r" (vector1b), [vector2b] "r" (vector2b),
+          [vector3b] "r" (vector3b), [vector4b] "r" (vector4b),
+          [vector4a] "r" (vector4a),
+          [cm] "r" (cm), [dst] "r" (dst), [src] "r" (src)
+    );
+
+    /* Next row... */
+    src += src_stride;
+    dst += dst_stride;
+  }
+}
+
+static void convolve_avg_horiz_8_dspr2(const uint8_t *src,
+                                       int32_t src_stride,
+                                       uint8_t *dst,
+                                       int32_t dst_stride,
+                                       const int16_t *filter_x0,
+                                       int32_t h) {
+  int32_t y;
+  uint8_t *cm = vpx_ff_cropTbl;
+  uint32_t vector4a = 64;
+  int32_t vector1b, vector2b, vector3b, vector4b;
+  int32_t Temp1, Temp2, Temp3;
+  uint32_t tp1, tp2;
+  uint32_t p1, p2, p3, p4, n1;
+  uint32_t tn1, tn2, tn3;
+  uint32_t st0, st1;
+
+  vector1b = ((const int32_t *)filter_x0)[0];
+  vector2b = ((const int32_t *)filter_x0)[1];
+  vector3b = ((const int32_t *)filter_x0)[2];
+  vector4b = ((const int32_t *)filter_x0)[3];
+
+  for (y = h; y--;) {
+    /* prefetch data to cache memory */
+    prefetch_load(src + src_stride);
+    prefetch_load(src + src_stride + 32);
+    prefetch_store(dst + dst_stride);
+
+    __asm__ __volatile__ (
+        "ulw              %[tp1],         0(%[src])                      \n\t"
+        "ulw              %[tp2],         4(%[src])                      \n\t"
+
+        /* even 1. pixel */
+        "mtlo             %[vector4a],    $ac3                           \n\t"
+        "mthi             $zero,          $ac3                           \n\t"
+        "mtlo             %[vector4a],    $ac2                           \n\t"
+        "mthi             $zero,          $ac2                           \n\t"
+        "preceu.ph.qbr    %[p1],          %[tp1]                         \n\t"
+        "preceu.ph.qbl    %[p2],          %[tp1]                         \n\t"
+        "preceu.ph.qbr    %[p3],          %[tp2]                         \n\t"
+        "preceu.ph.qbl    %[p4],          %[tp2]                         \n\t"
+        "ulw              %[tn2],         8(%[src])                      \n\t"
+        "dpa.w.ph         $ac3,           %[p1],          %[vector1b]    \n\t"
+        "dpa.w.ph         $ac3,           %[p2],          %[vector2b]    \n\t"
+        "dpa.w.ph         $ac3,           %[p3],          %[vector3b]    \n\t"
+        "dpa.w.ph         $ac3,           %[p4],          %[vector4b]    \n\t"
+        "extp             %[Temp1],       $ac3,           31             \n\t"
+        "lbu              %[Temp2],       0(%[dst])                      \n\t"
+        "lbu              %[tn3],         2(%[dst])                      \n\t"
+
+        /* even 2. pixel */
+        "preceu.ph.qbr    %[p1],          %[tn2]                         \n\t"
+        "preceu.ph.qbl    %[n1],          %[tn2]                         \n\t"
+        "ulw              %[tn1],         12(%[src])                     \n\t"
+        "dpa.w.ph         $ac2,           %[p2],          %[vector1b]    \n\t"
+        "dpa.w.ph         $ac2,           %[p3],          %[vector2b]    \n\t"
+        "dpa.w.ph         $ac2,           %[p4],          %[vector3b]    \n\t"
+        "dpa.w.ph         $ac2,           %[p1],          %[vector4b]    \n\t"
+        "extp             %[Temp3],       $ac2,           31             \n\t"
+
+        /* even 3. pixel */
+        "lbux             %[st0],         %[Temp1](%[cm])                \n\t"
+        "mtlo             %[vector4a],    $ac1                           \n\t"
+        "mthi             $zero,          $ac1                           \n\t"
+        "preceu.ph.qbr    %[p2],          %[tn1]                         \n\t"
+        "lbux             %[st1],         %[Temp3](%[cm])                \n\t"
+        "dpa.w.ph         $ac1,           %[p3],          %[vector1b]    \n\t"
+        "dpa.w.ph         $ac1,           %[p4],          %[vector2b]    \n\t"
+        "dpa.w.ph         $ac1,           %[p1],          %[vector3b]    \n\t"
+        "dpa.w.ph         $ac1,           %[n1],          %[vector4b]    \n\t"
+        "extp             %[Temp1],       $ac1,           31             \n\t"
+
+        "addqh_r.w        %[Temp2],       %[Temp2],       %[st0]         \n\t"
+        "addqh_r.w        %[tn3],         %[tn3],         %[st1]         \n\t"
+        "sb               %[Temp2],       0(%[dst])                      \n\t"
+        "sb               %[tn3],         2(%[dst])                      \n\t"
+
+        /* even 4. pixel */
+        "mtlo             %[vector4a],    $ac2                           \n\t"
+        "mthi             $zero,          $ac2                           \n\t"
+        "mtlo             %[vector4a],    $ac3                           \n\t"
+        "mthi             $zero,          $ac3                           \n\t"
+
+        "balign           %[tn3],         %[tn1],         3              \n\t"
+        "balign           %[tn1],         %[tn2],         3              \n\t"
+        "balign           %[tn2],         %[tp2],         3              \n\t"
+        "balign           %[tp2],         %[tp1],         3              \n\t"
+
+        "lbux             %[st0],         %[Temp1](%[cm])                \n\t"
+        "lbu              %[Temp2],       4(%[dst])                      \n\t"
+        "addqh_r.w        %[Temp2],       %[Temp2],       %[st0]         \n\t"
+
+        "dpa.w.ph         $ac2,           %[p4],          %[vector1b]    \n\t"
+        "dpa.w.ph         $ac2,           %[p1],          %[vector2b]    \n\t"
+        "dpa.w.ph         $ac2,           %[n1],          %[vector3b]    \n\t"
+        "dpa.w.ph         $ac2,           %[p2],          %[vector4b]    \n\t"
+        "extp             %[Temp3],       $ac2,           31             \n\t"
+
+        /* odd 1. pixel */
+        "mtlo             %[vector4a],    $ac1                           \n\t"
+        "mthi             $zero,          $ac1                           \n\t"
+        "sb               %[Temp2],       4(%[dst])                      \n\t"
+        "preceu.ph.qbr    %[p1],          %[tp2]                         \n\t"
+        "preceu.ph.qbl    %[p2],          %[tp2]                         \n\t"
+        "preceu.ph.qbr    %[p3],          %[tn2]                         \n\t"
+        "preceu.ph.qbl    %[p4],          %[tn2]                         \n\t"
+        "dpa.w.ph         $ac3,           %[p1],          %[vector1b]    \n\t"
+        "dpa.w.ph         $ac3,           %[p2],          %[vector2b]    \n\t"
+        "dpa.w.ph         $ac3,           %[p3],          %[vector3b]    \n\t"
+        "dpa.w.ph         $ac3,           %[p4],          %[vector4b]    \n\t"
+        "extp             %[Temp2],       $ac3,           31             \n\t"
+
+        "lbu              %[tp1],         6(%[dst])                      \n\t"
+
+        /* odd 2. pixel */
+        "mtlo             %[vector4a],    $ac3                           \n\t"
+        "mthi             $zero,          $ac3                           \n\t"
+        "mtlo             %[vector4a],    $ac2                           \n\t"
+        "mthi             $zero,          $ac2                           \n\t"
+        "preceu.ph.qbr    %[p1],          %[tn1]                         \n\t"
+        "preceu.ph.qbl    %[n1],          %[tn1]                         \n\t"
+        "lbux             %[st0],         %[Temp3](%[cm])                \n\t"
+        "dpa.w.ph         $ac1,           %[p2],          %[vector1b]    \n\t"
+        "dpa.w.ph         $ac1,           %[p3],          %[vector2b]    \n\t"
+        "dpa.w.ph         $ac1,           %[p4],          %[vector3b]    \n\t"
+        "dpa.w.ph         $ac1,           %[p1],          %[vector4b]    \n\t"
+        "extp             %[Temp3],       $ac1,           31             \n\t"
+
+        "lbu              %[tp2],         1(%[dst])                      \n\t"
+        "lbu              %[tn2],         3(%[dst])                      \n\t"
+        "addqh_r.w        %[tp1],         %[tp1],         %[st0]         \n\t"
+
+        /* odd 3. pixel */
+        "lbux             %[st1],         %[Temp2](%[cm])                \n\t"
+        "preceu.ph.qbr    %[p2],          %[tn3]                         \n\t"
+        "dpa.w.ph         $ac3,           %[p3],          %[vector1b]    \n\t"
+        "dpa.w.ph         $ac3,           %[p4],          %[vector2b]    \n\t"
+        "dpa.w.ph         $ac3,           %[p1],          %[vector3b]    \n\t"
+        "dpa.w.ph         $ac3,           %[n1],          %[vector4b]    \n\t"
+        "addqh_r.w        %[tp2],         %[tp2],         %[st1]         \n\t"
+        "extp             %[Temp2],       $ac3,           31             \n\t"
+
+        "lbu              %[tn3],         5(%[dst])                      \n\t"
+
+        /* odd 4. pixel */
+        "sb               %[tp2],         1(%[dst])                      \n\t"
+        "sb               %[tp1],         6(%[dst])                      \n\t"
+        "dpa.w.ph         $ac2,           %[p4],          %[vector1b]    \n\t"
+        "dpa.w.ph         $ac2,           %[p1],          %[vector2b]    \n\t"
+        "dpa.w.ph         $ac2,           %[n1],          %[vector3b]    \n\t"
+        "dpa.w.ph         $ac2,           %[p2],          %[vector4b]    \n\t"
+        "extp             %[Temp1],       $ac2,           31             \n\t"
+
+        "lbu              %[tn1],         7(%[dst])                      \n\t"
+
+        /* clamp */
+        "lbux             %[p4],          %[Temp3](%[cm])                \n\t"
+        "addqh_r.w        %[tn2],         %[tn2],         %[p4]          \n\t"
+
+        "lbux             %[p2],          %[Temp2](%[cm])                \n\t"
+        "addqh_r.w        %[tn3],         %[tn3],         %[p2]          \n\t"
+
+        "lbux             %[n1],          %[Temp1](%[cm])                \n\t"
+        "addqh_r.w        %[tn1],         %[tn1],         %[n1]          \n\t"
+
+        /* store bytes */
+        "sb               %[tn2],         3(%[dst])                      \n\t"
+        "sb               %[tn3],         5(%[dst])                      \n\t"
+        "sb               %[tn1],         7(%[dst])                      \n\t"
+
+        : [tp1] "=&r" (tp1), [tp2] "=&r" (tp2),
+          [tn1] "=&r" (tn1), [tn2] "=&r" (tn2), [tn3] "=&r" (tn3),
+          [st0] "=&r" (st0), [st1] "=&r" (st1),
+          [p1] "=&r" (p1), [p2] "=&r" (p2), [p3] "=&r" (p3), [p4] "=&r" (p4),
+          [n1] "=&r" (n1),
+          [Temp1] "=&r" (Temp1), [Temp2] "=&r" (Temp2), [Temp3] "=&r" (Temp3)
+        : [vector1b] "r" (vector1b), [vector2b] "r" (vector2b),
+          [vector3b] "r" (vector3b), [vector4b] "r" (vector4b),
+          [vector4a] "r" (vector4a),
+          [cm] "r" (cm), [dst] "r" (dst), [src] "r" (src)
+    );
+
+    /* Next row... */
+    src += src_stride;
+    dst += dst_stride;
+  }
+}
+
+static void convolve_avg_horiz_16_dspr2(const uint8_t *src_ptr,
+                                        int32_t src_stride,
+                                        uint8_t *dst_ptr,
+                                        int32_t dst_stride,
+                                        const int16_t *filter_x0,
+                                        int32_t h,
+                                        int32_t count) {
+  int32_t y, c;
+  const uint8_t *src;
+  uint8_t *dst;
+  uint8_t *cm = vpx_ff_cropTbl;
+  uint32_t vector_64 = 64;
+  int32_t filter12, filter34, filter56, filter78;
+  int32_t Temp1, Temp2, Temp3;
+  uint32_t qload1, qload2, qload3;
+  uint32_t p1, p2, p3, p4, p5;
+  uint32_t st1, st2, st3;
+
+  filter12 = ((const int32_t *)filter_x0)[0];
+  filter34 = ((const int32_t *)filter_x0)[1];
+  filter56 = ((const int32_t *)filter_x0)[2];
+  filter78 = ((const int32_t *)filter_x0)[3];
+
+  for (y = h; y--;) {
+    src = src_ptr;
+    dst = dst_ptr;
+
+    /* prefetch data to cache memory */
+    prefetch_load(src_ptr + src_stride);
+    prefetch_load(src_ptr + src_stride + 32);
+    prefetch_store(dst_ptr + dst_stride);
+
+    for (c = 0; c < count; c++) {
+      __asm__ __volatile__ (
+          "ulw              %[qload1],    0(%[src])                    \n\t"
+          "ulw              %[qload2],    4(%[src])                    \n\t"
+
+          /* even 1. pixel */
+          "mtlo             %[vector_64], $ac1                         \n\t" /* even 1 */
+          "mthi             $zero,        $ac1                         \n\t"
+          "mtlo             %[vector_64], $ac2                         \n\t" /* even 2 */
+          "mthi             $zero,        $ac2                         \n\t"
+          "preceu.ph.qbr    %[p1],        %[qload1]                    \n\t"
+          "preceu.ph.qbl    %[p2],        %[qload1]                    \n\t"
+          "preceu.ph.qbr    %[p3],        %[qload2]                    \n\t"
+          "preceu.ph.qbl    %[p4],        %[qload2]                    \n\t"
+          "ulw              %[qload3],    8(%[src])                    \n\t"
+          "dpa.w.ph         $ac1,         %[p1],          %[filter12]  \n\t" /* even 1 */
+          "dpa.w.ph         $ac1,         %[p2],          %[filter34]  \n\t" /* even 1 */
+          "dpa.w.ph         $ac1,         %[p3],          %[filter56]  \n\t" /* even 1 */
+          "dpa.w.ph         $ac1,         %[p4],          %[filter78]  \n\t" /* even 1 */
+          "extp             %[Temp1],     $ac1,           31           \n\t" /* even 1 */
+          "lbu              %[st2],       0(%[dst])                    \n\t" /* load even 1 from dst */
+
+          /* even 2. pixel */
+          "mtlo             %[vector_64], $ac3                         \n\t" /* even 3 */
+          "mthi             $zero,        $ac3                         \n\t"
+          "preceu.ph.qbr    %[p1],        %[qload3]                    \n\t"
+          "preceu.ph.qbl    %[p5],        %[qload3]                    \n\t"
+          "ulw              %[qload1],    12(%[src])                   \n\t"
+          "dpa.w.ph         $ac2,         %[p2],          %[filter12]  \n\t" /* even 1 */
+          "dpa.w.ph         $ac2,         %[p3],          %[filter34]  \n\t" /* even 1 */
+          "dpa.w.ph         $ac2,         %[p4],          %[filter56]  \n\t" /* even 1 */
+          "dpa.w.ph         $ac2,         %[p1],          %[filter78]  \n\t" /* even 1 */
+          "extp             %[Temp2],     $ac2,           31           \n\t" /* even 1 */
+          "lbux             %[st1],       %[Temp1](%[cm])              \n\t" /* even 1 */
+
+          "lbu              %[qload3],    2(%[dst])                    \n\t" /* load even 2 from dst */
+
+          /* even 3. pixel */
+          "mtlo             %[vector_64], $ac1                         \n\t" /* even 4 */
+          "mthi             $zero,        $ac1                         \n\t"
+          "addqh_r.w        %[st2],       %[st2],         %[st1]       \n\t" /* average even 1 */
+          "preceu.ph.qbr    %[p2],        %[qload1]                    \n\t"
+          "sb               %[st2],       0(%[dst])                    \n\t" /* store even 1 to dst */
+          "dpa.w.ph         $ac3,         %[p3],          %[filter12]  \n\t" /* even 3 */
+          "dpa.w.ph         $ac3,         %[p4],          %[filter34]  \n\t" /* even 3 */
+          "dpa.w.ph         $ac3,         %[p1],          %[filter56]  \n\t" /* even 3 */
+          "dpa.w.ph         $ac3,         %[p5],          %[filter78]  \n\t" /* even 3 */
+          "extp             %[Temp3],     $ac3,           31           \n\t" /* even 3 */
+          "lbux             %[st2],       %[Temp2](%[cm])              \n\t" /* even 1 */
+
+          /* even 4. pixel */
+          "mtlo             %[vector_64], $ac2                         \n\t" /* even 5 */
+          "mthi             $zero,        $ac2                         \n\t"
+          "addqh_r.w        %[qload3],    %[qload3],      %[st2]       \n\t" /* average even 2 */
+          "preceu.ph.qbl    %[p3],        %[qload1]                    \n\t"
+          "sb               %[qload3],    2(%[dst])                    \n\t" /* store even 2 to dst */
+          "ulw              %[qload2],    16(%[src])                   \n\t"
+          "lbu              %[qload3],    4(%[dst])                    \n\t" /* load even 3 from dst */
+          "lbu              %[qload1],    6(%[dst])                    \n\t" /* load even 4 from dst */
+          "dpa.w.ph         $ac1,         %[p4],          %[filter12]  \n\t" /* even 4 */
+          "dpa.w.ph         $ac1,         %[p1],          %[filter34]  \n\t" /* even 4 */
+          "dpa.w.ph         $ac1,         %[p5],          %[filter56]  \n\t" /* even 4 */
+          "dpa.w.ph         $ac1,         %[p2],          %[filter78]  \n\t" /* even 4 */
+          "extp             %[Temp1],     $ac1,           31           \n\t" /* even 4 */
+          "lbux             %[st3],       %[Temp3](%[cm])              \n\t" /* even 3 */
+
+          /* even 5. pixel */
+          "mtlo             %[vector_64], $ac3                         \n\t" /* even 6 */
+          "mthi             $zero,        $ac3                         \n\t"
+          "addqh_r.w        %[qload3],    %[qload3],      %[st3]       \n\t" /* average even 3 */
+          "preceu.ph.qbr    %[p4],        %[qload2]                    \n\t"
+          "sb               %[qload3],    4(%[dst])                    \n\t" /* store even 3 to dst */
+          "dpa.w.ph         $ac2,         %[p1],          %[filter12]  \n\t" /* even 5 */
+          "dpa.w.ph         $ac2,         %[p5],          %[filter34]  \n\t" /* even 5 */
+          "dpa.w.ph         $ac2,         %[p2],          %[filter56]  \n\t" /* even 5 */
+          "dpa.w.ph         $ac2,         %[p3],          %[filter78]  \n\t" /* even 5 */
+          "extp             %[Temp2],     $ac2,           31           \n\t" /* even 5 */
+          "lbux             %[st1],       %[Temp1](%[cm])              \n\t" /* even 4 */
+
+          /* even 6. pixel */
+          "mtlo             %[vector_64], $ac1                         \n\t" /* even 7 */
+          "mthi             $zero,        $ac1                         \n\t"
+          "addqh_r.w        %[qload1],    %[qload1],      %[st1]       \n\t" /* average even 4 */
+          "preceu.ph.qbl    %[p1],        %[qload2]                    \n\t"
+          "sb               %[qload1],    6(%[dst])                    \n\t" /* store even 4 to dst */
+          "ulw              %[qload3],    20(%[src])                   \n\t"
+          "dpa.w.ph         $ac3,         %[p5],          %[filter12]  \n\t" /* even 6 */
+          "dpa.w.ph         $ac3,         %[p2],          %[filter34]  \n\t" /* even 6 */
+          "dpa.w.ph         $ac3,         %[p3],          %[filter56]  \n\t" /* even 6 */
+          "dpa.w.ph         $ac3,         %[p4],          %[filter78]  \n\t" /* even 6 */
+          "lbu              %[qload2],    8(%[dst])                    \n\t" /* load even 5 from dst */
+          "extp             %[Temp3],     $ac3,           31           \n\t" /* even 6 */
+          "lbux             %[st2],       %[Temp2](%[cm])              \n\t" /* even 5 */
+
+          /* even 7. pixel */
+          "mtlo             %[vector_64], $ac2                         \n\t" /* even 8 */
+          "mthi             $zero,        $ac2                         \n\t"
+          "addqh_r.w        %[qload2],    %[qload2],      %[st2]       \n\t" /* average even 5 */
+          "preceu.ph.qbr    %[p5],        %[qload3]                    \n\t"
+          "sb               %[qload2],    8(%[dst])                    \n\t" /* store even 5 to dst */
+          "dpa.w.ph         $ac1,         %[p2],          %[filter12]  \n\t" /* even 7 */
+          "dpa.w.ph         $ac1,         %[p3],          %[filter34]  \n\t" /* even 7 */
+          "dpa.w.ph         $ac1,         %[p4],          %[filter56]  \n\t" /* even 7 */
+          "dpa.w.ph         $ac1,         %[p1],          %[filter78]  \n\t" /* even 7 */
+          "lbu              %[qload3],    10(%[dst])                   \n\t" /* load even 6 from dst */
+          "extp             %[Temp1],     $ac1,           31           \n\t" /* even 7 */
+          "lbux             %[st3],       %[Temp3](%[cm])              \n\t" /* even 6 */
+
+          "lbu              %[st2],       12(%[dst])                   \n\t" /* load even 7 from dst */
+
+          /* even 8. pixel */
+          "mtlo             %[vector_64], $ac3                         \n\t" /* odd 1 */
+          "mthi             $zero,        $ac3                         \n\t"
+          "addqh_r.w        %[qload3],    %[qload3],      %[st3]       \n\t" /* average even 6 */
+          "dpa.w.ph         $ac2,         %[p3],          %[filter12]  \n\t" /* even 8 */
+          "dpa.w.ph         $ac2,         %[p4],          %[filter34]  \n\t" /* even 8 */
+          "sb               %[qload3],    10(%[dst])                   \n\t" /* store even 6 to dst */
+          "dpa.w.ph         $ac2,         %[p1],          %[filter56]  \n\t" /* even 8 */
+          "dpa.w.ph         $ac2,         %[p5],          %[filter78]  \n\t" /* even 8 */
+          "extp             %[Temp2],     $ac2,           31           \n\t" /* even 8 */
+          "lbux             %[st1],       %[Temp1](%[cm])              \n\t" /* even 7 */
+
+          /* ODD pixels */
+          "ulw              %[qload1],    1(%[src])                   \n\t"
+          "ulw              %[qload2],    5(%[src])                    \n\t"
+
+          "addqh_r.w        %[st2],       %[st2],         %[st1]       \n\t" /* average even 7 */
+
+          /* odd 1. pixel */
+          "mtlo             %[vector_64], $ac1                         \n\t" /* odd 2 */
+          "mthi             $zero,        $ac1                         \n\t"
+          "preceu.ph.qbr    %[p1],        %[qload1]                    \n\t"
+          "preceu.ph.qbl    %[p2],        %[qload1]                    \n\t"
+          "preceu.ph.qbr    %[p3],        %[qload2]                    \n\t"
+          "preceu.ph.qbl    %[p4],        %[qload2]                    \n\t"
+          "sb               %[st2],       12(%[dst])                   \n\t" /* store even 7 to dst */
+          "ulw              %[qload3],    9(%[src])                    \n\t"
+          "dpa.w.ph         $ac3,         %[p1],          %[filter12]  \n\t" /* odd 1 */
+          "dpa.w.ph         $ac3,         %[p2],          %[filter34]  \n\t" /* odd 1 */
+          "lbu              %[qload2],    14(%[dst])                   \n\t" /* load even 8 from dst */
+          "dpa.w.ph         $ac3,         %[p3],          %[filter56]  \n\t" /* odd 1 */
+          "dpa.w.ph         $ac3,         %[p4],          %[filter78]  \n\t" /* odd 1 */
+          "extp             %[Temp3],     $ac3,           31           \n\t" /* odd 1 */
+          "lbux             %[st2],       %[Temp2](%[cm])              \n\t" /* even 8 */
+
+          "lbu              %[st1],       1(%[dst])                    \n\t" /* load odd 1 from dst */
+
+          /* odd 2. pixel */
+          "mtlo             %[vector_64], $ac2                         \n\t" /* odd 3 */
+          "mthi             $zero,        $ac2                         \n\t"
+          "addqh_r.w        %[qload2],    %[qload2],      %[st2]       \n\t" /* average even 8 */
+          "preceu.ph.qbr    %[p1],        %[qload3]                    \n\t"
+          "preceu.ph.qbl    %[p5],        %[qload3]                    \n\t"
+          "sb               %[qload2],    14(%[dst])                   \n\t" /* store even 8 to dst */
+          "ulw              %[qload1],    13(%[src])                   \n\t"
+          "dpa.w.ph         $ac1,         %[p2],          %[filter12]  \n\t" /* odd 2 */
+          "dpa.w.ph         $ac1,         %[p3],          %[filter34]  \n\t" /* odd 2 */
+          "dpa.w.ph         $ac1,         %[p4],          %[filter56]  \n\t" /* odd 2 */
+          "dpa.w.ph         $ac1,         %[p1],          %[filter78]  \n\t" /* odd 2 */
+          "lbu              %[qload3],    3(%[dst])                    \n\t" /* load odd 2 from dst */
+          "extp             %[Temp1],     $ac1,           31           \n\t" /* odd 2 */
+          "lbux             %[st3],       %[Temp3](%[cm])              \n\t" /* odd 1 */
+
+          /* odd 3. pixel */
+          "mtlo             %[vector_64], $ac3                         \n\t" /* odd 4 */
+          "mthi             $zero,        $ac3                         \n\t"
+          "addqh_r.w        %[st3],       %[st3],         %[st1]       \n\t" /* average odd 1 */
+          "preceu.ph.qbr    %[p2],        %[qload1]                    \n\t"
+          "dpa.w.ph         $ac2,         %[p3],          %[filter12]  \n\t" /* odd 3 */
+          "dpa.w.ph         $ac2,         %[p4],          %[filter34]  \n\t" /* odd 3 */
+          "dpa.w.ph         $ac2,         %[p1],          %[filter56]  \n\t" /* odd 3 */
+          "dpa.w.ph         $ac2,         %[p5],          %[filter78]  \n\t" /* odd 3 */
+          "sb               %[st3],       1(%[dst])                    \n\t" /* store odd 1 to dst */
+          "extp             %[Temp2],     $ac2,           31           \n\t" /* odd 3 */
+          "lbux             %[st1],       %[Temp1](%[cm])              \n\t" /* odd 2 */
+
+          /* odd 4. pixel */
+          "mtlo             %[vector_64], $ac1                         \n\t" /* odd 5 */
+          "mthi             $zero,        $ac1                         \n\t"
+          "addqh_r.w        %[qload3],    %[qload3],      %[st1]       \n\t" /* average odd 2 */
+          "preceu.ph.qbl    %[p3],        %[qload1]                    \n\t"
+          "sb               %[qload3],    3(%[dst])                    \n\t" /* store odd 2 to dst */
+          "lbu              %[qload1],    5(%[dst])                    \n\t" /* load odd 3 from dst */
+          "ulw              %[qload2],    17(%[src])                   \n\t"
+          "dpa.w.ph         $ac3,         %[p4],          %[filter12]  \n\t" /* odd 4 */
+          "dpa.w.ph         $ac3,         %[p1],          %[filter34]  \n\t" /* odd 4 */
+          "dpa.w.ph         $ac3,         %[p5],          %[filter56]  \n\t" /* odd 4 */
+          "dpa.w.ph         $ac3,         %[p2],          %[filter78]  \n\t" /* odd 4 */
+          "extp             %[Temp3],     $ac3,           31           \n\t" /* odd 4 */
+          "lbux             %[st2],       %[Temp2](%[cm])              \n\t" /* odd 3 */
+
+          "lbu              %[st1],       7(%[dst])                    \n\t" /* load odd 4 from dst */
+
+          /* odd 5. pixel */
+          "mtlo             %[vector_64], $ac2                         \n\t" /* odd 6 */
+          "mthi             $zero,        $ac2                         \n\t"
+          "addqh_r.w        %[qload1],    %[qload1],      %[st2]       \n\t" /* average odd 3 */
+          "preceu.ph.qbr    %[p4],        %[qload2]                    \n\t"
+          "sb               %[qload1],    5(%[dst])                    \n\t" /* store odd 3 to dst */
+          "dpa.w.ph         $ac1,         %[p1],          %[filter12]  \n\t" /* odd 5 */
+          "dpa.w.ph         $ac1,         %[p5],          %[filter34]  \n\t" /* odd 5 */
+          "dpa.w.ph         $ac1,         %[p2],          %[filter56]  \n\t" /* odd 5 */
+          "dpa.w.ph         $ac1,         %[p3],          %[filter78]  \n\t" /* odd 5 */
+          "extp             %[Temp1],     $ac1,           31           \n\t" /* odd 5 */
+          "lbux             %[st3],       %[Temp3](%[cm])              \n\t" /* odd 4 */
+
+          "lbu              %[qload1],    9(%[dst])                    \n\t" /* load odd 5 from dst */
+
+          /* odd 6. pixel */
+          "mtlo             %[vector_64], $ac3                         \n\t" /* odd 7 */
+          "mthi             $zero,        $ac3                         \n\t"
+          "addqh_r.w        %[st1],       %[st1],         %[st3]       \n\t" /* average odd 4 */
+          "preceu.ph.qbl    %[p1],        %[qload2]                    \n\t"
+          "sb               %[st1],       7(%[dst])                    \n\t" /* store odd 4 to dst */
+          "ulw              %[qload3],    21(%[src])                   \n\t"
+          "dpa.w.ph         $ac2,         %[p5],          %[filter12]  \n\t" /* odd 6 */
+          "dpa.w.ph         $ac2,         %[p2],          %[filter34]  \n\t" /* odd 6 */
+          "dpa.w.ph         $ac2,         %[p3],          %[filter56]  \n\t" /* odd 6 */
+          "dpa.w.ph         $ac2,         %[p4],          %[filter78]  \n\t" /* odd 6 */
+          "extp             %[Temp2],     $ac2,           31           \n\t" /* odd 6 */
+          "lbux             %[st1],       %[Temp1](%[cm])              \n\t" /* odd 5 */
+
+          /* odd 7. pixel */
+          "mtlo             %[vector_64], $ac1                         \n\t" /* odd 8 */
+          "mthi             $zero,        $ac1                         \n\t"
+          "addqh_r.w        %[qload1],    %[qload1],      %[st1]       \n\t" /* average odd 5 */
+          "preceu.ph.qbr    %[p5],        %[qload3]                    \n\t"
+          "sb               %[qload1],    9(%[dst])                    \n\t" /* store odd 5 to dst */
+          "lbu              %[qload2],    11(%[dst])                   \n\t" /* load odd 6 from dst */
+          "dpa.w.ph         $ac3,         %[p2],          %[filter12]  \n\t" /* odd 7 */
+          "dpa.w.ph         $ac3,         %[p3],          %[filter34]  \n\t" /* odd 7 */
+          "dpa.w.ph         $ac3,         %[p4],          %[filter56]  \n\t" /* odd 7 */
+          "dpa.w.ph         $ac3,         %[p1],          %[filter78]  \n\t" /* odd 7 */
+          "extp             %[Temp3],     $ac3,           31           \n\t" /* odd 7 */
+
+          "lbu              %[qload3],    13(%[dst])                   \n\t" /* load odd 7 from dst */
+
+          /* odd 8. pixel */
+          "dpa.w.ph         $ac1,         %[p3],          %[filter12]  \n\t" /* odd 8 */
+          "dpa.w.ph         $ac1,         %[p4],          %[filter34]  \n\t" /* odd 8 */
+          "dpa.w.ph         $ac1,         %[p1],          %[filter56]  \n\t" /* odd 8 */
+          "dpa.w.ph         $ac1,         %[p5],          %[filter78]  \n\t" /* odd 8 */
+          "extp             %[Temp1],     $ac1,           31           \n\t" /* odd 8 */
+
+          "lbu              %[qload1],    15(%[dst])                   \n\t" /* load odd 8 from dst */
+
+          "lbux             %[st2],       %[Temp2](%[cm])              \n\t" /* odd 6 */
+          "addqh_r.w        %[qload2],    %[qload2],      %[st2]       \n\t" /* average odd 6 */
+
+          "lbux             %[st3],       %[Temp3](%[cm])              \n\t" /* odd 7 */
+          "addqh_r.w        %[qload3],    %[qload3],      %[st3]       \n\t" /* average odd 7 */
+
+          "lbux             %[st1],       %[Temp1](%[cm])              \n\t" /* odd 8 */
+          "addqh_r.w        %[qload1],    %[qload1],      %[st1]       \n\t" /* average odd 8 */
+
+          "sb               %[qload2],    11(%[dst])                   \n\t" /* store odd 6 to dst */
+          "sb               %[qload3],    13(%[dst])                   \n\t" /* store odd 7 to dst */
+          "sb               %[qload1],    15(%[dst])                   \n\t" /* store odd 8 to dst */
+
+          : [qload1] "=&r" (qload1), [qload2] "=&r" (qload2),
+            [st1] "=&r" (st1), [st2] "=&r" (st2), [st3] "=&r" (st3),
+            [p1] "=&r" (p1), [p2] "=&r" (p2), [p3] "=&r" (p3), [p4] "=&r" (p4),
+            [qload3] "=&r" (qload3), [p5] "=&r" (p5),
+            [Temp1] "=&r" (Temp1), [Temp2] "=&r" (Temp2), [Temp3] "=&r" (Temp3)
+          : [filter12] "r" (filter12), [filter34] "r" (filter34),
+            [filter56] "r" (filter56), [filter78] "r" (filter78),
+            [vector_64] "r" (vector_64),
+            [cm] "r" (cm), [dst] "r" (dst), [src] "r" (src)
+      );
+
+      src += 16;
+      dst += 16;
+    }
+
+    /* Next row... */
+    src_ptr += src_stride;
+    dst_ptr += dst_stride;
+  }
+}
+
+static void convolve_avg_horiz_64_dspr2(const uint8_t *src_ptr,
+                                        int32_t src_stride,
+                                        uint8_t *dst_ptr,
+                                        int32_t dst_stride,
+                                        const int16_t *filter_x0,
+                                        int32_t h) {
+  int32_t y, c;
+  const uint8_t *src;
+  uint8_t *dst;
+  uint8_t *cm = vpx_ff_cropTbl;
+  uint32_t vector_64 = 64;
+  int32_t filter12, filter34, filter56, filter78;
+  int32_t Temp1, Temp2, Temp3;
+  uint32_t qload1, qload2, qload3;
+  uint32_t p1, p2, p3, p4, p5;
+  uint32_t st1, st2, st3;
+
+  filter12 = ((const int32_t *)filter_x0)[0];
+  filter34 = ((const int32_t *)filter_x0)[1];
+  filter56 = ((const int32_t *)filter_x0)[2];
+  filter78 = ((const int32_t *)filter_x0)[3];
+
+  for (y = h; y--;) {
+    src = src_ptr;
+    dst = dst_ptr;
+
+    /* prefetch data to cache memory */
+    prefetch_load(src_ptr + src_stride);
+    prefetch_load(src_ptr + src_stride + 32);
+    prefetch_load(src_ptr + src_stride + 64);
+    prefetch_store(dst_ptr + dst_stride);
+    prefetch_store(dst_ptr + dst_stride + 32);
+
+    for (c = 0; c < 4; c++) {
+      __asm__ __volatile__ (
+          "ulw              %[qload1],    0(%[src])                    \n\t"
+          "ulw              %[qload2],    4(%[src])                    \n\t"
+
+          /* even 1. pixel */
+          "mtlo             %[vector_64], $ac1                         \n\t" /* even 1 */
+          "mthi             $zero,        $ac1                         \n\t"
+          "mtlo             %[vector_64], $ac2                         \n\t" /* even 2 */
+          "mthi             $zero,        $ac2                         \n\t"
+          "preceu.ph.qbr    %[p1],        %[qload1]                    \n\t"
+          "preceu.ph.qbl    %[p2],        %[qload1]                    \n\t"
+          "preceu.ph.qbr    %[p3],        %[qload2]                    \n\t"
+          "preceu.ph.qbl    %[p4],        %[qload2]                    \n\t"
+          "ulw              %[qload3],    8(%[src])                    \n\t"
+          "dpa.w.ph         $ac1,         %[p1],          %[filter12]  \n\t" /* even 1 */
+          "dpa.w.ph         $ac1,         %[p2],          %[filter34]  \n\t" /* even 1 */
+          "dpa.w.ph         $ac1,         %[p3],          %[filter56]  \n\t" /* even 1 */
+          "dpa.w.ph         $ac1,         %[p4],          %[filter78]  \n\t" /* even 1 */
+          "extp             %[Temp1],     $ac1,           31           \n\t" /* even 1 */
+          "lbu              %[st2],       0(%[dst])                    \n\t" /* load even 1 from dst */
+
+          /* even 2. pixel */
+          "mtlo             %[vector_64], $ac3                         \n\t" /* even 3 */
+          "mthi             $zero,        $ac3                         \n\t"
+          "preceu.ph.qbr    %[p1],        %[qload3]                    \n\t"
+          "preceu.ph.qbl    %[p5],        %[qload3]                    \n\t"
+          "ulw              %[qload1],    12(%[src])                   \n\t"
+          "dpa.w.ph         $ac2,         %[p2],          %[filter12]  \n\t" /* even 1 */
+          "dpa.w.ph         $ac2,         %[p3],          %[filter34]  \n\t" /* even 1 */
+          "dpa.w.ph         $ac2,         %[p4],          %[filter56]  \n\t" /* even 1 */
+          "dpa.w.ph         $ac2,         %[p1],          %[filter78]  \n\t" /* even 1 */
+          "extp             %[Temp2],     $ac2,           31           \n\t" /* even 1 */
+          "lbux             %[st1],       %[Temp1](%[cm])              \n\t" /* even 1 */
+
+          "lbu              %[qload3],    2(%[dst])                    \n\t" /* load even 2 from dst */
+
+          /* even 3. pixel */
+          "mtlo             %[vector_64], $ac1                         \n\t" /* even 4 */
+          "mthi             $zero,        $ac1                         \n\t"
+          "addqh_r.w        %[st2],       %[st2],         %[st1]       \n\t" /* average even 1 */
+          "preceu.ph.qbr    %[p2],        %[qload1]                    \n\t"
+          "sb               %[st2],       0(%[dst])                    \n\t" /* store even 1 to dst */
+          "dpa.w.ph         $ac3,         %[p3],          %[filter12]  \n\t" /* even 3 */
+          "dpa.w.ph         $ac3,         %[p4],          %[filter34]  \n\t" /* even 3 */
+          "dpa.w.ph         $ac3,         %[p1],          %[filter56]  \n\t" /* even 3 */
+          "dpa.w.ph         $ac3,         %[p5],          %[filter78]  \n\t" /* even 3 */
+          "extp             %[Temp3],     $ac3,           31           \n\t" /* even 3 */
+          "lbux             %[st2],       %[Temp2](%[cm])              \n\t" /* even 1 */
+
+          /* even 4. pixel */
+          "mtlo             %[vector_64], $ac2                         \n\t" /* even 5 */
+          "mthi             $zero,        $ac2                         \n\t"
+          "addqh_r.w        %[qload3],    %[qload3],      %[st2]       \n\t" /* average even 2 */
+          "preceu.ph.qbl    %[p3],        %[qload1]                    \n\t"
+          "sb               %[qload3],    2(%[dst])                    \n\t" /* store even 2 to dst */
+          "ulw              %[qload2],    16(%[src])                   \n\t"
+          "lbu              %[qload3],    4(%[dst])                    \n\t" /* load even 3 from dst */
+          "lbu              %[qload1],    6(%[dst])                    \n\t" /* load even 4 from dst */
+          "dpa.w.ph         $ac1,         %[p4],          %[filter12]  \n\t" /* even 4 */
+          "dpa.w.ph         $ac1,         %[p1],          %[filter34]  \n\t" /* even 4 */
+          "dpa.w.ph         $ac1,         %[p5],          %[filter56]  \n\t" /* even 4 */
+          "dpa.w.ph         $ac1,         %[p2],          %[filter78]  \n\t" /* even 4 */
+          "extp             %[Temp1],     $ac1,           31           \n\t" /* even 4 */
+          "lbux             %[st3],       %[Temp3](%[cm])              \n\t" /* even 3 */
+
+          /* even 5. pixel */
+          "mtlo             %[vector_64], $ac3                         \n\t" /* even 6 */
+          "mthi             $zero,        $ac3                         \n\t"
+          "addqh_r.w        %[qload3],    %[qload3],      %[st3]       \n\t" /* average even 3 */
+          "preceu.ph.qbr    %[p4],        %[qload2]                    \n\t"
+          "sb               %[qload3],    4(%[dst])                    \n\t" /* store even 3 to dst */
+          "dpa.w.ph         $ac2,         %[p1],          %[filter12]  \n\t" /* even 5 */
+          "dpa.w.ph         $ac2,         %[p5],          %[filter34]  \n\t" /* even 5 */
+          "dpa.w.ph         $ac2,         %[p2],          %[filter56]  \n\t" /* even 5 */
+          "dpa.w.ph         $ac2,         %[p3],          %[filter78]  \n\t" /* even 5 */
+          "extp             %[Temp2],     $ac2,           31           \n\t" /* even 5 */
+          "lbux             %[st1],       %[Temp1](%[cm])              \n\t" /* even 4 */
+
+          /* even 6. pixel */
+          "mtlo             %[vector_64], $ac1                         \n\t" /* even 7 */
+          "mthi             $zero,        $ac1                         \n\t"
+          "addqh_r.w        %[qload1],    %[qload1],      %[st1]       \n\t" /* average even 4 */
+          "preceu.ph.qbl    %[p1],        %[qload2]                    \n\t"
+          "sb               %[qload1],    6(%[dst])                    \n\t" /* store even 4 to dst */
+          "ulw              %[qload3],    20(%[src])                   \n\t"
+          "dpa.w.ph         $ac3,         %[p5],          %[filter12]  \n\t" /* even 6 */
+          "dpa.w.ph         $ac3,         %[p2],          %[filter34]  \n\t" /* even 6 */
+          "dpa.w.ph         $ac3,         %[p3],          %[filter56]  \n\t" /* even 6 */
+          "dpa.w.ph         $ac3,         %[p4],          %[filter78]  \n\t" /* even 6 */
+          "lbu              %[qload2],    8(%[dst])                    \n\t" /* load even 5 from dst */
+          "extp             %[Temp3],     $ac3,           31           \n\t" /* even 6 */
+          "lbux             %[st2],       %[Temp2](%[cm])              \n\t" /* even 5 */
+
+          /* even 7. pixel */
+          "mtlo             %[vector_64], $ac2                         \n\t" /* even 8 */
+          "mthi             $zero,        $ac2                         \n\t"
+          "addqh_r.w        %[qload2],    %[qload2],      %[st2]       \n\t" /* average even 5 */
+          "preceu.ph.qbr    %[p5],        %[qload3]                    \n\t"
+          "sb               %[qload2],    8(%[dst])                    \n\t" /* store even 5 to dst */
+          "dpa.w.ph         $ac1,         %[p2],          %[filter12]  \n\t" /* even 7 */
+          "dpa.w.ph         $ac1,         %[p3],          %[filter34]  \n\t" /* even 7 */
+          "dpa.w.ph         $ac1,         %[p4],          %[filter56]  \n\t" /* even 7 */
+          "dpa.w.ph         $ac1,         %[p1],          %[filter78]  \n\t" /* even 7 */
+          "lbu              %[qload3],    10(%[dst])                   \n\t" /* load even 6 from dst */
+          "extp             %[Temp1],     $ac1,           31           \n\t" /* even 7 */
+          "lbux             %[st3],       %[Temp3](%[cm])              \n\t" /* even 6 */
+
+          "lbu              %[st2],       12(%[dst])                   \n\t" /* load even 7 from dst */
+
+          /* even 8. pixel */
+          "mtlo             %[vector_64], $ac3                         \n\t" /* odd 1 */
+          "mthi             $zero,        $ac3                         \n\t"
+          "addqh_r.w        %[qload3],    %[qload3],      %[st3]       \n\t" /* average even 6 */
+          "dpa.w.ph         $ac2,         %[p3],          %[filter12]  \n\t" /* even 8 */
+          "dpa.w.ph         $ac2,         %[p4],          %[filter34]  \n\t" /* even 8 */
+          "sb               %[qload3],    10(%[dst])                   \n\t" /* store even 6 to dst */
+          "dpa.w.ph         $ac2,         %[p1],          %[filter56]  \n\t" /* even 8 */
+          "dpa.w.ph         $ac2,         %[p5],          %[filter78]  \n\t" /* even 8 */
+          "extp             %[Temp2],     $ac2,           31           \n\t" /* even 8 */
+          "lbux             %[st1],       %[Temp1](%[cm])              \n\t" /* even 7 */
+
+          /* ODD pixels */
+          "ulw              %[qload1],    1(%[src])                   \n\t"
+          "ulw              %[qload2],    5(%[src])                    \n\t"
+
+          "addqh_r.w        %[st2],       %[st2],         %[st1]       \n\t" /* average even 7 */
+
+          /* odd 1. pixel */
+          "mtlo             %[vector_64], $ac1                         \n\t" /* odd 2 */
+          "mthi             $zero,        $ac1                         \n\t"
+          "preceu.ph.qbr    %[p1],        %[qload1]                    \n\t"
+          "preceu.ph.qbl    %[p2],        %[qload1]                    \n\t"
+          "preceu.ph.qbr    %[p3],        %[qload2]                    \n\t"
+          "preceu.ph.qbl    %[p4],        %[qload2]                    \n\t"
+          "sb               %[st2],       12(%[dst])                   \n\t" /* store even 7 to dst */
+          "ulw              %[qload3],    9(%[src])                    \n\t"
+          "dpa.w.ph         $ac3,         %[p1],          %[filter12]  \n\t" /* odd 1 */
+          "dpa.w.ph         $ac3,         %[p2],          %[filter34]  \n\t" /* odd 1 */
+          "lbu              %[qload2],    14(%[dst])                   \n\t" /* load even 8 from dst */
+          "dpa.w.ph         $ac3,         %[p3],          %[filter56]  \n\t" /* odd 1 */
+          "dpa.w.ph         $ac3,         %[p4],          %[filter78]  \n\t" /* odd 1 */
+          "extp             %[Temp3],     $ac3,           31           \n\t" /* odd 1 */
+          "lbux             %[st2],       %[Temp2](%[cm])              \n\t" /* even 8 */
+
+          "lbu              %[st1],       1(%[dst])                    \n\t" /* load odd 1 from dst */
+
+          /* odd 2. pixel */
+          "mtlo             %[vector_64], $ac2                         \n\t" /* odd 3 */
+          "mthi             $zero,        $ac2                         \n\t"
+          "addqh_r.w        %[qload2],    %[qload2],      %[st2]       \n\t" /* average even 8 */
+          "preceu.ph.qbr    %[p1],        %[qload3]                    \n\t"
+          "preceu.ph.qbl    %[p5],        %[qload3]                    \n\t"
+          "sb               %[qload2],    14(%[dst])                   \n\t" /* store even 8 to dst */
+          "ulw              %[qload1],    13(%[src])                   \n\t"
+          "dpa.w.ph         $ac1,         %[p2],          %[filter12]  \n\t" /* odd 2 */
+          "dpa.w.ph         $ac1,         %[p3],          %[filter34]  \n\t" /* odd 2 */
+          "dpa.w.ph         $ac1,         %[p4],          %[filter56]  \n\t" /* odd 2 */
+          "dpa.w.ph         $ac1,         %[p1],          %[filter78]  \n\t" /* odd 2 */
+          "lbu              %[qload3],    3(%[dst])                    \n\t" /* load odd 2 from dst */
+          "extp             %[Temp1],     $ac1,           31           \n\t" /* odd 2 */
+          "lbux             %[st3],       %[Temp3](%[cm])              \n\t" /* odd 1 */
+
+          /* odd 3. pixel */
+          "mtlo             %[vector_64], $ac3                         \n\t" /* odd 4 */
+          "mthi             $zero,        $ac3                         \n\t"
+          "addqh_r.w        %[st3],       %[st3],         %[st1]       \n\t" /* average odd 1 */
+          "preceu.ph.qbr    %[p2],        %[qload1]                    \n\t"
+          "dpa.w.ph         $ac2,         %[p3],          %[filter12]  \n\t" /* odd 3 */
+          "dpa.w.ph         $ac2,         %[p4],          %[filter34]  \n\t" /* odd 3 */
+          "dpa.w.ph         $ac2,         %[p1],          %[filter56]  \n\t" /* odd 3 */
+          "dpa.w.ph         $ac2,         %[p5],          %[filter78]  \n\t" /* odd 3 */
+          "sb               %[st3],       1(%[dst])                    \n\t" /* store odd 1 to dst */
+          "extp             %[Temp2],     $ac2,           31           \n\t" /* odd 3 */
+          "lbux             %[st1],       %[Temp1](%[cm])              \n\t" /* odd 2 */
+
+          /* odd 4. pixel */
+          "mtlo             %[vector_64], $ac1                         \n\t" /* odd 5 */
+          "mthi             $zero,        $ac1                         \n\t"
+          "addqh_r.w        %[qload3],    %[qload3],      %[st1]       \n\t" /* average odd 2 */
+          "preceu.ph.qbl    %[p3],        %[qload1]                    \n\t"
+          "sb               %[qload3],    3(%[dst])                    \n\t" /* store odd 2 to dst */
+          "lbu              %[qload1],    5(%[dst])                    \n\t" /* load odd 3 from dst */
+          "ulw              %[qload2],    17(%[src])                   \n\t"
+          "dpa.w.ph         $ac3,         %[p4],          %[filter12]  \n\t" /* odd 4 */
+          "dpa.w.ph         $ac3,         %[p1],          %[filter34]  \n\t" /* odd 4 */
+          "dpa.w.ph         $ac3,         %[p5],          %[filter56]  \n\t" /* odd 4 */
+          "dpa.w.ph         $ac3,         %[p2],          %[filter78]  \n\t" /* odd 4 */
+          "extp             %[Temp3],     $ac3,           31           \n\t" /* odd 4 */
+          "lbux             %[st2],       %[Temp2](%[cm])              \n\t" /* odd 3 */
+
+          "lbu              %[st1],       7(%[dst])                    \n\t" /* load odd 4 from dst */
+
+          /* odd 5. pixel */
+          "mtlo             %[vector_64], $ac2                         \n\t" /* odd 6 */
+          "mthi             $zero,        $ac2                         \n\t"
+          "addqh_r.w        %[qload1],    %[qload1],      %[st2]       \n\t" /* average odd 3 */
+          "preceu.ph.qbr    %[p4],        %[qload2]                    \n\t"
+          "sb               %[qload1],    5(%[dst])                    \n\t" /* store odd 3 to dst */
+          "dpa.w.ph         $ac1,         %[p1],          %[filter12]  \n\t" /* odd 5 */
+          "dpa.w.ph         $ac1,         %[p5],          %[filter34]  \n\t" /* odd 5 */
+          "dpa.w.ph         $ac1,         %[p2],          %[filter56]  \n\t" /* odd 5 */
+          "dpa.w.ph         $ac1,         %[p3],          %[filter78]  \n\t" /* odd 5 */
+          "extp             %[Temp1],     $ac1,           31           \n\t" /* odd 5 */
+          "lbux             %[st3],       %[Temp3](%[cm])              \n\t" /* odd 4 */
+
+          "lbu              %[qload1],    9(%[dst])                    \n\t" /* load odd 5 from dst */
+
+          /* odd 6. pixel */
+          "mtlo             %[vector_64], $ac3                         \n\t" /* odd 7 */
+          "mthi             $zero,        $ac3                         \n\t"
+          "addqh_r.w        %[st1],       %[st1],         %[st3]       \n\t" /* average odd 4 */
+          "preceu.ph.qbl    %[p1],        %[qload2]                    \n\t"
+          "sb               %[st1],       7(%[dst])                    \n\t" /* store odd 4 to dst */
+          "ulw              %[qload3],    21(%[src])                   \n\t"
+          "dpa.w.ph         $ac2,         %[p5],          %[filter12]  \n\t" /* odd 6 */
+          "dpa.w.ph         $ac2,         %[p2],          %[filter34]  \n\t" /* odd 6 */
+          "dpa.w.ph         $ac2,         %[p3],          %[filter56]  \n\t" /* odd 6 */
+          "dpa.w.ph         $ac2,         %[p4],          %[filter78]  \n\t" /* odd 6 */
+          "extp             %[Temp2],     $ac2,           31           \n\t" /* odd 6 */
+          "lbux             %[st1],       %[Temp1](%[cm])              \n\t" /* odd 5 */
+
+          /* odd 7. pixel */
+          "mtlo             %[vector_64], $ac1                         \n\t" /* odd 8 */
+          "mthi             $zero,        $ac1                         \n\t"
+          "addqh_r.w        %[qload1],    %[qload1],      %[st1]       \n\t" /* average odd 5 */
+          "preceu.ph.qbr    %[p5],        %[qload3]                    \n\t"
+          "sb               %[qload1],    9(%[dst])                    \n\t" /* store odd 5 to dst */
+          "lbu              %[qload2],    11(%[dst])                   \n\t" /* load odd 6 from dst */
+          "dpa.w.ph         $ac3,         %[p2],          %[filter12]  \n\t" /* odd 7 */
+          "dpa.w.ph         $ac3,         %[p3],          %[filter34]  \n\t" /* odd 7 */
+          "dpa.w.ph         $ac3,         %[p4],          %[filter56]  \n\t" /* odd 7 */
+          "dpa.w.ph         $ac3,         %[p1],          %[filter78]  \n\t" /* odd 7 */
+          "extp             %[Temp3],     $ac3,           31           \n\t" /* odd 7 */
+
+          "lbu              %[qload3],    13(%[dst])                   \n\t" /* load odd 7 from dst */
+
+          /* odd 8. pixel */
+          "dpa.w.ph         $ac1,         %[p3],          %[filter12]  \n\t" /* odd 8 */
+          "dpa.w.ph         $ac1,         %[p4],          %[filter34]  \n\t" /* odd 8 */
+          "dpa.w.ph         $ac1,         %[p1],          %[filter56]  \n\t" /* odd 8 */
+          "dpa.w.ph         $ac1,         %[p5],          %[filter78]  \n\t" /* odd 8 */
+          "extp             %[Temp1],     $ac1,           31           \n\t" /* odd 8 */
+
+          "lbu              %[qload1],    15(%[dst])                   \n\t" /* load odd 8 from dst */
+
+          "lbux             %[st2],       %[Temp2](%[cm])              \n\t" /* odd 6 */
+          "addqh_r.w        %[qload2],    %[qload2],      %[st2]       \n\t" /* average odd 6 */
+
+          "lbux             %[st3],       %[Temp3](%[cm])              \n\t" /* odd 7 */
+          "addqh_r.w        %[qload3],    %[qload3],      %[st3]       \n\t" /* average odd 7 */
+
+          "lbux             %[st1],       %[Temp1](%[cm])              \n\t" /* odd 8 */
+          "addqh_r.w        %[qload1],    %[qload1],      %[st1]       \n\t" /* average odd 8 */
+
+          "sb               %[qload2],    11(%[dst])                   \n\t" /* store odd 6 to dst */
+          "sb               %[qload3],    13(%[dst])                   \n\t" /* store odd 7 to dst */
+          "sb               %[qload1],    15(%[dst])                   \n\t" /* store odd 8 to dst */
+
+          : [qload1] "=&r" (qload1), [qload2] "=&r" (qload2),
+            [st1] "=&r" (st1), [st2] "=&r" (st2), [st3] "=&r" (st3),
+            [p1] "=&r" (p1), [p2] "=&r" (p2), [p3] "=&r" (p3), [p4] "=&r" (p4),
+            [qload3] "=&r" (qload3), [p5] "=&r" (p5),
+            [Temp1] "=&r" (Temp1), [Temp2] "=&r" (Temp2), [Temp3] "=&r" (Temp3)
+          : [filter12] "r" (filter12), [filter34] "r" (filter34),
+            [filter56] "r" (filter56), [filter78] "r" (filter78),
+            [vector_64] "r" (vector_64),
+            [cm] "r" (cm), [dst] "r" (dst), [src] "r" (src)
+      );
+
+      src += 16;
+      dst += 16;
+    }
+
+    /* Next row... */
+    src_ptr += src_stride;
+    dst_ptr += dst_stride;
+  }
+}
+
+void vpx_convolve8_avg_horiz_dspr2(const uint8_t *src, ptrdiff_t src_stride,
+                                   uint8_t *dst, ptrdiff_t dst_stride,
+                                   const int16_t *filter_x, int x_step_q4,
+                                   const int16_t *filter_y, int y_step_q4,
+                                   int w, int h) {
+  if (((const int32_t *)filter_x)[1] == 0x800000) {
+    vpx_convolve_avg(src, src_stride,
+                     dst, dst_stride,
+                     filter_x, x_step_q4,
+                     filter_y, y_step_q4,
+                     w, h);
+  } else if (((const int32_t *)filter_x)[0] == 0) {
+    vpx_convolve2_avg_horiz_dspr2(src, src_stride,
+                                  dst, dst_stride,
+                                  filter_x, x_step_q4,
+                                  filter_y, y_step_q4,
+                                  w, h);
+  } else {
+    if (16 == x_step_q4) {
+      uint32_t pos = 38;
+
+      src -= 3;
+
+      /* bit positon for extract from acc */
+      __asm__ __volatile__ (
+        "wrdsp      %[pos],     1           \n\t"
+        :
+        : [pos] "r" (pos)
+      );
+
+      /* prefetch data to cache memory */
+      prefetch_load(src);
+      prefetch_load(src + 32);
+      prefetch_store(dst);
+
+      switch (w) {
+        case 4:
+          convolve_avg_horiz_4_dspr2(src, src_stride,
+                                     dst, dst_stride,
+                                     filter_x, h);
+          break;
+        case 8:
+          convolve_avg_horiz_8_dspr2(src, src_stride,
+                                     dst, dst_stride,
+                                     filter_x, h);
+          break;
+        case 16:
+          convolve_avg_horiz_16_dspr2(src, src_stride,
+                                      dst, dst_stride,
+                                      filter_x, h, 1);
+          break;
+        case 32:
+          convolve_avg_horiz_16_dspr2(src, src_stride,
+                                      dst, dst_stride,
+                                      filter_x, h, 2);
+          break;
+        case 64:
+          prefetch_load(src + 64);
+          prefetch_store(dst + 32);
+
+          convolve_avg_horiz_64_dspr2(src, src_stride,
+                                      dst, dst_stride,
+                                      filter_x, h);
+          break;
+        default:
+          vpx_convolve8_avg_horiz_c(src + 3, src_stride,
+                                    dst, dst_stride,
+                                    filter_x, x_step_q4,
+                                    filter_y, y_step_q4,
+                                    w, h);
+          break;
+      }
+    } else {
+      vpx_convolve8_avg_horiz_c(src, src_stride,
+                                dst, dst_stride,
+                                filter_x, x_step_q4,
+                                filter_y, y_step_q4,
+                                w, h);
+    }
+  }
+}
+#endif
--- /dev/null
+++ b/vpx_dsp/mips/convolve8_dspr2.c
@@ -1,0 +1,1282 @@
+/*
+ *  Copyright (c) 2013 The WebM project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <assert.h>
+#include <stdio.h>
+
+#include "./vpx_dsp_rtcd.h"
+#include "vpx_dsp/mips/convolve_common_dspr2.h"
+#include "vpx_dsp/vpx_dsp_common.h"
+#include "vpx_dsp/vpx_filter.h"
+#include "vpx_ports/mem.h"
+
+#if HAVE_DSPR2
+uint8_t vpx_ff_cropTbl_a[256 + 2 * CROP_WIDTH];
+uint8_t *vpx_ff_cropTbl;
+
+void vpx_dsputil_static_init(void) {
+  int i;
+
+  for (i = 0; i < 256; i++) vpx_ff_cropTbl_a[i + CROP_WIDTH] = i;
+
+  for (i = 0; i < CROP_WIDTH; i++) {
+    vpx_ff_cropTbl_a[i] = 0;
+    vpx_ff_cropTbl_a[i + CROP_WIDTH + 256] = 255;
+  }
+
+  vpx_ff_cropTbl = &vpx_ff_cropTbl_a[CROP_WIDTH];
+}
+
+static void convolve_horiz_4_transposed_dspr2(const uint8_t *src,
+                                              int32_t src_stride,
+                                              uint8_t *dst,
+                                              int32_t dst_stride,
+                                              const int16_t *filter_x0,
+                                              int32_t h) {
+  int32_t y;
+  uint8_t *cm = vpx_ff_cropTbl;
+  uint8_t *dst_ptr;
+  int32_t vector1b, vector2b, vector3b, vector4b;
+  int32_t Temp1, Temp2, Temp3, Temp4;
+  uint32_t vector4a = 64;
+  uint32_t tp1, tp2;
+  uint32_t p1, p2, p3, p4;
+  uint32_t tn1, tn2;
+
+  vector1b = ((const int32_t *)filter_x0)[0];
+  vector2b = ((const int32_t *)filter_x0)[1];
+  vector3b = ((const int32_t *)filter_x0)[2];
+  vector4b = ((const int32_t *)filter_x0)[3];
+
+  for (y = h; y--;) {
+    dst_ptr = dst;
+    /* prefetch data to cache memory */
+    prefetch_load(src + src_stride);
+    prefetch_load(src + src_stride + 32);
+
+    __asm__ __volatile__ (
+        "ulw              %[tp1],         0(%[src])                      \n\t"
+        "ulw              %[tp2],         4(%[src])                      \n\t"
+
+        /* even 1. pixel */
+        "mtlo             %[vector4a],    $ac3                           \n\t"
+        "mthi             $zero,          $ac3                           \n\t"
+        "preceu.ph.qbr    %[p1],          %[tp1]                         \n\t"
+        "preceu.ph.qbl    %[p2],          %[tp1]                         \n\t"
+        "preceu.ph.qbr    %[p3],          %[tp2]                         \n\t"
+        "preceu.ph.qbl    %[p4],          %[tp2]                         \n\t"
+        "dpa.w.ph         $ac3,           %[p1],          %[vector1b]    \n\t"
+        "dpa.w.ph         $ac3,           %[p2],          %[vector2b]    \n\t"
+        "dpa.w.ph         $ac3,           %[p3],          %[vector3b]    \n\t"
+        "ulw              %[tn2],         8(%[src])                      \n\t"
+        "dpa.w.ph         $ac3,           %[p4],          %[vector4b]    \n\t"
+        "extp             %[Temp1],       $ac3,           31             \n\t"
+
+        /* even 2. pixel */
+        "mtlo             %[vector4a],    $ac2                           \n\t"
+        "mthi             $zero,          $ac2                           \n\t"
+        "preceu.ph.qbr    %[p1],          %[tn2]                         \n\t"
+        "balign           %[tn1],         %[tn2],         3              \n\t"
+        "balign           %[tn2],         %[tp2],         3              \n\t"
+        "balign           %[tp2],         %[tp1],         3              \n\t"
+        "dpa.w.ph         $ac2,           %[p2],          %[vector1b]    \n\t"
+        "dpa.w.ph         $ac2,           %[p3],          %[vector2b]    \n\t"
+        "dpa.w.ph         $ac2,           %[p4],          %[vector3b]    \n\t"
+        "dpa.w.ph         $ac2,           %[p1],          %[vector4b]    \n\t"
+        "extp             %[Temp3],       $ac2,           31             \n\t"
+
+        /* odd 1. pixel */
+        "lbux             %[tp1],         %[Temp1](%[cm])                \n\t"
+        "mtlo             %[vector4a],    $ac3                           \n\t"
+        "mthi             $zero,          $ac3                           \n\t"
+        "preceu.ph.qbr    %[p1],          %[tp2]                         \n\t"
+        "preceu.ph.qbl    %[p2],          %[tp2]                         \n\t"
+        "preceu.ph.qbr    %[p3],          %[tn2]                         \n\t"
+        "preceu.ph.qbl    %[p4],          %[tn2]                         \n\t"
+        "dpa.w.ph         $ac3,           %[p1],          %[vector1b]    \n\t"
+        "dpa.w.ph         $ac3,           %[p2],          %[vector2b]    \n\t"
+        "dpa.w.ph         $ac3,           %[p3],          %[vector3b]    \n\t"
+        "dpa.w.ph         $ac3,           %[p4],          %[vector4b]    \n\t"
+        "extp             %[Temp2],       $ac3,           31             \n\t"
+
+        /* odd 2. pixel */
+        "lbux             %[tp2],         %[Temp3](%[cm])                \n\t"
+        "mtlo             %[vector4a],    $ac2                           \n\t"
+        "mthi             $zero,          $ac2                           \n\t"
+        "preceu.ph.qbr    %[p1],          %[tn1]                         \n\t"
+        "dpa.w.ph         $ac2,           %[p2],          %[vector1b]    \n\t"
+        "dpa.w.ph         $ac2,           %[p3],          %[vector2b]    \n\t"
+        "dpa.w.ph         $ac2,           %[p4],          %[vector3b]    \n\t"
+        "dpa.w.ph         $ac2,           %[p1],          %[vector4b]    \n\t"
+        "extp             %[Temp4],       $ac2,           31             \n\t"
+
+        /* clamp */
+        "lbux             %[tn1],         %[Temp2](%[cm])                \n\t"
+        "lbux             %[p2],          %[Temp4](%[cm])                \n\t"
+
+        /* store bytes */
+        "sb               %[tp1],         0(%[dst_ptr])                  \n\t"
+        "addu             %[dst_ptr],     %[dst_ptr],     %[dst_stride]  \n\t"
+
+        "sb               %[tn1],         0(%[dst_ptr])                  \n\t"
+        "addu             %[dst_ptr],     %[dst_ptr],     %[dst_stride]  \n\t"
+
+        "sb               %[tp2],         0(%[dst_ptr])                  \n\t"
+        "addu             %[dst_ptr],     %[dst_ptr],     %[dst_stride]  \n\t"
+
+        "sb               %[p2],          0(%[dst_ptr])                  \n\t"
+        "addu             %[dst_ptr],     %[dst_ptr],     %[dst_stride]  \n\t"
+
+        : [tp1] "=&r" (tp1), [tp2] "=&r" (tp2), [tn1] "=&r" (tn1), [tn2] "=&r" (tn2),
+          [p1] "=&r" (p1), [p2] "=&r" (p2), [p3] "=&r" (p3), [p4] "=&r" (p4),
+          [Temp1] "=&r" (Temp1), [Temp2] "=&r" (Temp2), [Temp3] "=&r" (Temp3), [Temp4] "=&r" (Temp4),
+          [dst_ptr] "+r" (dst_ptr)
+        : [vector1b] "r" (vector1b), [vector2b] "r" (vector2b),
+          [vector3b] "r" (vector3b), [vector4b] "r" (vector4b),
+          [vector4a] "r" (vector4a),
+          [cm] "r" (cm), [src] "r" (src), [dst_stride] "r" (dst_stride)
+    );
+
+    /* Next row... */
+    src += src_stride;
+    dst += 1;
+  }
+}
+
+static void convolve_horiz_8_transposed_dspr2(const uint8_t *src,
+                                              int32_t src_stride,
+                                              uint8_t *dst,
+                                              int32_t dst_stride,
+                                              const int16_t *filter_x0,
+                                              int32_t h) {
+  int32_t y;
+  uint8_t *cm = vpx_ff_cropTbl;
+  uint8_t *dst_ptr;
+  uint32_t vector4a = 64;
+  int32_t vector1b, vector2b, vector3b, vector4b;
+  int32_t Temp1, Temp2, Temp3;
+  uint32_t tp1, tp2, tp3;
+  uint32_t p1, p2, p3, p4, n1;
+  uint8_t *odd_dst;
+  uint32_t dst_pitch_2 = (dst_stride << 1);
+
+  vector1b = ((const int32_t *)filter_x0)[0];
+  vector2b = ((const int32_t *)filter_x0)[1];
+  vector3b = ((const int32_t *)filter_x0)[2];
+  vector4b = ((const int32_t *)filter_x0)[3];
+
+  for (y = h; y--;) {
+    /* prefetch data to cache memory */
+    prefetch_load(src + src_stride);
+    prefetch_load(src + src_stride + 32);
+
+    dst_ptr = dst;
+    odd_dst = (dst_ptr + dst_stride);
+
+    __asm__ __volatile__ (
+        "ulw              %[tp2],         0(%[src])                       \n\t"
+        "ulw              %[tp1],         4(%[src])                       \n\t"
+
+        /* even 1. pixel */
+        "mtlo             %[vector4a],    $ac3                            \n\t"
+        "mthi             $zero,          $ac3                            \n\t"
+        "mtlo             %[vector4a],    $ac2                            \n\t"
+        "mthi             $zero,          $ac2                            \n\t"
+        "preceu.ph.qbr    %[p1],          %[tp2]                          \n\t"
+        "preceu.ph.qbl    %[p2],          %[tp2]                          \n\t"
+        "preceu.ph.qbr    %[p3],          %[tp1]                          \n\t"
+        "preceu.ph.qbl    %[p4],          %[tp1]                          \n\t"
+        "ulw              %[tp3],         8(%[src])                       \n\t"
+        "dpa.w.ph         $ac3,           %[p1],          %[vector1b]     \n\t"
+        "dpa.w.ph         $ac3,           %[p2],          %[vector2b]     \n\t"
+        "dpa.w.ph         $ac3,           %[p3],          %[vector3b]     \n\t"
+        "dpa.w.ph         $ac3,           %[p4],          %[vector4b]     \n\t"
+        "extp             %[Temp1],       $ac3,           31              \n\t"
+
+        /* even 2. pixel */
+        "preceu.ph.qbr    %[p1],          %[tp3]                          \n\t"
+        "preceu.ph.qbl    %[n1],          %[tp3]                          \n\t"
+        "ulw              %[tp2],         12(%[src])                      \n\t"
+        "dpa.w.ph         $ac2,           %[p2],          %[vector1b]     \n\t"
+        "dpa.w.ph         $ac2,           %[p3],          %[vector2b]     \n\t"
+        "dpa.w.ph         $ac2,           %[p4],          %[vector3b]     \n\t"
+        "dpa.w.ph         $ac2,           %[p1],          %[vector4b]     \n\t"
+        "extp             %[Temp3],       $ac2,           31              \n\t"
+
+        /* even 3. pixel */
+        "lbux             %[Temp2],       %[Temp1](%[cm])                 \n\t"
+        "mtlo             %[vector4a],    $ac1                            \n\t"
+        "mthi             $zero,          $ac1                            \n\t"
+        "preceu.ph.qbr    %[p2],          %[tp2]                          \n\t"
+        "dpa.w.ph         $ac1,           %[p3],          %[vector1b]     \n\t"
+        "dpa.w.ph         $ac1,           %[p4],          %[vector2b]     \n\t"
+        "dpa.w.ph         $ac1,           %[p1],          %[vector3b]     \n\t"
+        "lbux             %[tp3],         %[Temp3](%[cm])                 \n\t"
+        "dpa.w.ph         $ac1,           %[n1],          %[vector4b]     \n\t"
+        "extp             %[p3],          $ac1,           31              \n\t"
+
+        /* even 4. pixel */
+        "mtlo             %[vector4a],    $ac2                            \n\t"
+        "mthi             $zero,          $ac2                            \n\t"
+        "mtlo             %[vector4a],    $ac3                            \n\t"
+        "mthi             $zero,          $ac3                            \n\t"
+        "sb               %[Temp2],       0(%[dst_ptr])                   \n\t"
+        "addu             %[dst_ptr],     %[dst_ptr],     %[dst_pitch_2]  \n\t"
+        "sb               %[tp3],         0(%[dst_ptr])                   \n\t"
+        "addu             %[dst_ptr],     %[dst_ptr],     %[dst_pitch_2]  \n\t"
+
+        "ulw              %[tp1],         1(%[src])                       \n\t"
+        "ulw              %[tp3],         5(%[src])                       \n\t"
+
+        "dpa.w.ph         $ac2,           %[p4],          %[vector1b]     \n\t"
+        "dpa.w.ph         $ac2,           %[p1],          %[vector2b]     \n\t"
+        "dpa.w.ph         $ac2,           %[n1],          %[vector3b]     \n\t"
+        "dpa.w.ph         $ac2,           %[p2],          %[vector4b]     \n\t"
+        "extp             %[Temp3],       $ac2,           31              \n\t"
+
+        "lbux             %[tp2],         %[p3](%[cm])                    \n\t"
+
+        /* odd 1. pixel */
+        "mtlo             %[vector4a],    $ac1                            \n\t"
+        "mthi             $zero,          $ac1                            \n\t"
+        "preceu.ph.qbr    %[p1],          %[tp1]                          \n\t"
+        "preceu.ph.qbl    %[p2],          %[tp1]                          \n\t"
+        "preceu.ph.qbr    %[p3],          %[tp3]                          \n\t"
+        "preceu.ph.qbl    %[p4],          %[tp3]                          \n\t"
+        "sb               %[tp2],         0(%[dst_ptr])                   \n\t"
+        "addu             %[dst_ptr],     %[dst_ptr],     %[dst_pitch_2]  \n\t"
+        "ulw              %[tp2],         9(%[src])                       \n\t"
+
+        "dpa.w.ph         $ac3,           %[p1],          %[vector1b]     \n\t"
+        "dpa.w.ph         $ac3,           %[p2],          %[vector2b]     \n\t"
+        "dpa.w.ph         $ac3,           %[p3],          %[vector3b]     \n\t"
+        "dpa.w.ph         $ac3,           %[p4],          %[vector4b]     \n\t"
+        "extp             %[Temp2],       $ac3,           31              \n\t"
+
+        /* odd 2. pixel */
+        "lbux             %[tp1],         %[Temp3](%[cm])                 \n\t"
+        "mtlo             %[vector4a],    $ac3                            \n\t"
+        "mthi             $zero,          $ac3                            \n\t"
+        "mtlo             %[vector4a],    $ac2                            \n\t"
+        "mthi             $zero,          $ac2                            \n\t"
+        "preceu.ph.qbr    %[p1],          %[tp2]                          \n\t"
+        "preceu.ph.qbl    %[n1],          %[tp2]                          \n\t"
+        "ulw              %[Temp1],       13(%[src])                      \n\t"
+        "dpa.w.ph         $ac1,           %[p2],          %[vector1b]     \n\t"
+        "sb               %[tp1],         0(%[dst_ptr])                   \n\t"
+        "addu             %[dst_ptr],     %[dst_ptr],     %[dst_pitch_2]  \n\t"
+        "dpa.w.ph         $ac1,           %[p3],          %[vector2b]     \n\t"
+        "dpa.w.ph         $ac1,           %[p4],          %[vector3b]     \n\t"
+        "dpa.w.ph         $ac1,           %[p1],          %[vector4b]     \n\t"
+        "extp             %[Temp3],       $ac1,           31              \n\t"
+
+        /* odd 3. pixel */
+        "lbux             %[tp3],         %[Temp2](%[cm])                 \n\t"
+        "preceu.ph.qbr    %[p2],          %[Temp1]                        \n\t"
+        "dpa.w.ph         $ac3,           %[p3],          %[vector1b]     \n\t"
+        "dpa.w.ph         $ac3,           %[p4],          %[vector2b]     \n\t"
+        "dpa.w.ph         $ac3,           %[p1],          %[vector3b]     \n\t"
+        "dpa.w.ph         $ac3,           %[n1],          %[vector4b]     \n\t"
+        "extp             %[Temp2],       $ac3,           31              \n\t"
+
+        /* odd 4. pixel */
+        "sb               %[tp3],         0(%[odd_dst])                   \n\t"
+        "addu             %[odd_dst],     %[odd_dst],     %[dst_pitch_2]  \n\t"
+        "dpa.w.ph         $ac2,           %[p4],          %[vector1b]     \n\t"
+        "dpa.w.ph         $ac2,           %[p1],          %[vector2b]     \n\t"
+        "dpa.w.ph         $ac2,           %[n1],          %[vector3b]     \n\t"
+        "dpa.w.ph         $ac2,           %[p2],          %[vector4b]     \n\t"
+        "extp             %[Temp1],       $ac2,           31              \n\t"
+
+        /* clamp */
+        "lbux             %[p4],          %[Temp3](%[cm])                 \n\t"
+        "lbux             %[p2],          %[Temp2](%[cm])                 \n\t"
+        "lbux             %[n1],          %[Temp1](%[cm])                 \n\t"
+
+        /* store bytes */
+        "sb               %[p4],          0(%[odd_dst])                   \n\t"
+        "addu             %[odd_dst],     %[odd_dst],     %[dst_pitch_2]  \n\t"
+
+        "sb               %[p2],          0(%[odd_dst])                   \n\t"
+        "addu             %[odd_dst],     %[odd_dst],     %[dst_pitch_2]  \n\t"
+
+        "sb               %[n1],          0(%[odd_dst])                   \n\t"
+
+        : [tp1] "=&r" (tp1), [tp2] "=&r" (tp2), [tp3] "=&r" (tp3),
+          [p1] "=&r" (p1), [p2] "=&r" (p2), [p3] "=&r" (p3), [p4] "=&r" (p4),
+          [n1] "=&r" (n1),
+          [Temp1] "=&r" (Temp1), [Temp2] "=&r" (Temp2), [Temp3] "=&r" (Temp3),
+          [dst_ptr] "+r" (dst_ptr), [odd_dst] "+r" (odd_dst)
+        : [vector1b] "r" (vector1b), [vector2b] "r" (vector2b),
+          [vector3b] "r" (vector3b), [vector4b] "r" (vector4b),
+          [vector4a] "r" (vector4a), [cm] "r" (cm),
+          [src] "r" (src), [dst_pitch_2] "r" (dst_pitch_2)
+    );
+
+    /* Next row... */
+    src += src_stride;
+    dst += 1;
+  }
+}
+
+static void convolve_horiz_16_transposed_dspr2(const uint8_t *src_ptr,
+                                               int32_t src_stride,
+                                               uint8_t *dst_ptr,
+                                               int32_t dst_stride,
+                                               const int16_t *filter_x0,
+                                               int32_t h,
+                                               int32_t count) {
+  int32_t c, y;
+  const uint8_t *src;
+  uint8_t *dst;
+  uint8_t *cm = vpx_ff_cropTbl;
+  uint32_t vector_64 = 64;
+  int32_t  filter12, filter34, filter56, filter78;
+  int32_t  Temp1, Temp2, Temp3;
+  uint32_t qload1, qload2;
+  uint32_t p1, p2, p3, p4, p5;
+  uint32_t st1, st2, st3;
+  uint32_t dst_pitch_2 = (dst_stride << 1);
+  uint8_t  *odd_dst;
+
+  filter12 = ((const int32_t *)filter_x0)[0];
+  filter34 = ((const int32_t *)filter_x0)[1];
+  filter56 = ((const int32_t *)filter_x0)[2];
+  filter78 = ((const int32_t *)filter_x0)[3];
+
+  for (y = h; y--;) {
+    /* prefetch data to cache memory */
+    prefetch_load(src_ptr + src_stride);
+    prefetch_load(src_ptr + src_stride + 32);
+
+    src = src_ptr;
+    dst = dst_ptr;
+
+    odd_dst = (dst + dst_stride);
+
+    for (c = 0; c < count; c++) {
+      __asm__ __volatile__ (
+          "ulw              %[qload1],        0(%[src])                       \n\t"
+          "ulw              %[qload2],        4(%[src])                       \n\t"
+
+          /* even 1. pixel */
+          "mtlo             %[vector_64],     $ac1                            \n\t" /* even 1 */
+          "mthi             $zero,            $ac1                            \n\t"
+          "mtlo             %[vector_64],     $ac2                            \n\t" /* even 2 */
+          "mthi             $zero,            $ac2                            \n\t"
+          "preceu.ph.qbr    %[p3],            %[qload2]                       \n\t"
+          "preceu.ph.qbl    %[p4],            %[qload2]                       \n\t"
+          "preceu.ph.qbr    %[p1],            %[qload1]                       \n\t"
+          "preceu.ph.qbl    %[p2],            %[qload1]                       \n\t"
+          "ulw              %[qload2],        8(%[src])                       \n\t"
+          "dpa.w.ph         $ac1,             %[p1],          %[filter12]     \n\t" /* even 1 */
+          "dpa.w.ph         $ac1,             %[p2],          %[filter34]     \n\t" /* even 1 */
+          "dpa.w.ph         $ac1,             %[p3],          %[filter56]     \n\t" /* even 1 */
+          "dpa.w.ph         $ac1,             %[p4],          %[filter78]     \n\t" /* even 1 */
+          "extp             %[Temp1],         $ac1,           31              \n\t" /* even 1 */
+
+          /* even 2. pixel */
+          "mtlo             %[vector_64],     $ac3                            \n\t" /* even 3 */
+          "mthi             $zero,            $ac3                            \n\t"
+          "preceu.ph.qbr    %[p1],            %[qload2]                       \n\t"
+          "preceu.ph.qbl    %[p5],            %[qload2]                       \n\t"
+          "ulw              %[qload1],        12(%[src])                      \n\t"
+          "dpa.w.ph         $ac2,             %[p2],          %[filter12]     \n\t" /* even 1 */
+          "dpa.w.ph         $ac2,             %[p3],          %[filter34]     \n\t" /* even 1 */
+          "dpa.w.ph         $ac2,             %[p4],          %[filter56]     \n\t" /* even 1 */
+          "dpa.w.ph         $ac2,             %[p1],          %[filter78]     \n\t" /* even 1 */
+          "lbux             %[st1],           %[Temp1](%[cm])                 \n\t" /* even 1 */
+          "extp             %[Temp2],         $ac2,           31              \n\t" /* even 1 */
+
+          /* even 3. pixel */
+          "mtlo             %[vector_64],     $ac1                            \n\t" /* even 4 */
+          "mthi             $zero,            $ac1                            \n\t"
+          "preceu.ph.qbr    %[p2],            %[qload1]                       \n\t"
+          "sb               %[st1],           0(%[dst])                       \n\t" /* even 1 */
+          "addu             %[dst],           %[dst],         %[dst_pitch_2]             \n\t"
+          "dpa.w.ph         $ac3,             %[p3],          %[filter12]     \n\t" /* even 3 */
+          "dpa.w.ph         $ac3,             %[p4],          %[filter34]     \n\t" /* even 3 */
+          "dpa.w.ph         $ac3,             %[p1],          %[filter56]     \n\t" /* even 3 */
+          "dpa.w.ph         $ac3,             %[p5],          %[filter78]     \n\t" /* even 3 */
+          "extp             %[Temp3],         $ac3,           31              \n\t" /* even 3 */
+          "lbux             %[st2],           %[Temp2](%[cm])                 \n\t" /* even 1 */
+
+          /* even 4. pixel */
+          "mtlo             %[vector_64],     $ac2                            \n\t" /* even 5 */
+          "mthi             $zero,            $ac2                            \n\t"
+          "preceu.ph.qbl    %[p3],            %[qload1]                       \n\t"
+          "sb               %[st2],           0(%[dst])                       \n\t" /* even 2 */
+          "addu             %[dst],           %[dst],         %[dst_pitch_2]  \n\t"
+          "ulw              %[qload2],        16(%[src])                      \n\t"
+          "dpa.w.ph         $ac1,             %[p4],          %[filter12]     \n\t" /* even 4 */
+          "dpa.w.ph         $ac1,             %[p1],          %[filter34]     \n\t" /* even 4 */
+          "dpa.w.ph         $ac1,             %[p5],          %[filter56]     \n\t" /* even 4 */
+          "dpa.w.ph         $ac1,             %[p2],          %[filter78]     \n\t" /* even 4 */
+          "extp             %[Temp1],         $ac1,           31              \n\t" /* even 4 */
+          "lbux             %[st3],           %[Temp3](%[cm])                 \n\t" /* even 3 */
+
+          /* even 5. pixel */
+          "mtlo             %[vector_64],     $ac3                            \n\t" /* even 6 */
+          "mthi             $zero,            $ac3                            \n\t"
+          "preceu.ph.qbr    %[p4],            %[qload2]                       \n\t"
+          "sb               %[st3],           0(%[dst])                       \n\t" /* even 3 */
+          "addu             %[dst],           %[dst],         %[dst_pitch_2]  \n\t"
+          "dpa.w.ph         $ac2,             %[p1],          %[filter12]     \n\t" /* even 5 */
+          "dpa.w.ph         $ac2,             %[p5],          %[filter34]     \n\t" /* even 5 */
+          "dpa.w.ph         $ac2,             %[p2],          %[filter56]     \n\t" /* even 5 */
+          "dpa.w.ph         $ac2,             %[p3],          %[filter78]     \n\t" /* even 5 */
+          "extp             %[Temp2],         $ac2,           31              \n\t" /* even 5 */
+          "lbux             %[st1],           %[Temp1](%[cm])                 \n\t" /* even 4 */
+
+          /* even 6. pixel */
+          "mtlo             %[vector_64],     $ac1                            \n\t" /* even 7 */
+          "mthi             $zero,            $ac1                            \n\t"
+          "preceu.ph.qbl    %[p1],            %[qload2]                       \n\t"
+          "sb               %[st1],           0(%[dst])                       \n\t" /* even 4 */
+          "addu             %[dst],           %[dst],         %[dst_pitch_2]  \n\t"
+          "ulw              %[qload1],        20(%[src])                      \n\t"
+          "dpa.w.ph         $ac3,             %[p5],          %[filter12]     \n\t" /* even 6 */
+          "dpa.w.ph         $ac3,             %[p2],          %[filter34]     \n\t" /* even 6 */
+          "dpa.w.ph         $ac3,             %[p3],          %[filter56]     \n\t" /* even 6 */
+          "dpa.w.ph         $ac3,             %[p4],          %[filter78]     \n\t" /* even 6 */
+          "extp             %[Temp3],         $ac3,           31              \n\t" /* even 6 */
+          "lbux             %[st2],           %[Temp2](%[cm])                 \n\t" /* even 5 */
+
+          /* even 7. pixel */
+          "mtlo             %[vector_64],     $ac2                            \n\t" /* even 8 */
+          "mthi             $zero,            $ac2                            \n\t"
+          "preceu.ph.qbr    %[p5],            %[qload1]                       \n\t"
+          "sb               %[st2],           0(%[dst])                       \n\t" /* even 5 */
+          "addu             %[dst],           %[dst],         %[dst_pitch_2]  \n\t"
+          "dpa.w.ph         $ac1,             %[p2],          %[filter12]     \n\t" /* even 7 */
+          "dpa.w.ph         $ac1,             %[p3],          %[filter34]     \n\t" /* even 7 */
+          "dpa.w.ph         $ac1,             %[p4],          %[filter56]     \n\t" /* even 7 */
+          "dpa.w.ph         $ac1,             %[p1],          %[filter78]     \n\t" /* even 7 */
+          "extp             %[Temp1],         $ac1,           31              \n\t" /* even 7 */
+          "lbux             %[st3],           %[Temp3](%[cm])                 \n\t" /* even 6 */
+
+          /* even 8. pixel */
+          "mtlo             %[vector_64],     $ac3                            \n\t" /* odd 1 */
+          "mthi             $zero,            $ac3                            \n\t"
+          "dpa.w.ph         $ac2,             %[p3],          %[filter12]     \n\t" /* even 8 */
+          "dpa.w.ph         $ac2,             %[p4],          %[filter34]     \n\t" /* even 8 */
+          "sb               %[st3],           0(%[dst])                       \n\t" /* even 6 */
+          "addu             %[dst],           %[dst],         %[dst_pitch_2]  \n\t"
+          "dpa.w.ph         $ac2,             %[p1],          %[filter56]     \n\t" /* even 8 */
+          "dpa.w.ph         $ac2,             %[p5],          %[filter78]     \n\t" /* even 8 */
+          "extp             %[Temp2],         $ac2,           31              \n\t" /* even 8 */
+          "lbux             %[st1],           %[Temp1](%[cm])                 \n\t" /* even 7 */
+
+          /* ODD pixels */
+          "ulw              %[qload1],        1(%[src])                       \n\t"
+          "ulw              %[qload2],        5(%[src])                       \n\t"
+
+          /* odd 1. pixel */
+          "mtlo             %[vector_64],     $ac1                            \n\t" /* odd 2 */
+          "mthi             $zero,            $ac1                            \n\t"
+          "preceu.ph.qbr    %[p1],            %[qload1]                       \n\t"
+          "preceu.ph.qbl    %[p2],            %[qload1]                       \n\t"
+          "preceu.ph.qbr    %[p3],            %[qload2]                       \n\t"
+          "preceu.ph.qbl    %[p4],            %[qload2]                       \n\t"
+          "sb               %[st1],           0(%[dst])                       \n\t" /* even 7 */
+          "addu             %[dst],           %[dst],         %[dst_pitch_2]  \n\t"
+          "ulw              %[qload2],        9(%[src])                       \n\t"
+          "dpa.w.ph         $ac3,             %[p1],          %[filter12]     \n\t" /* odd 1 */
+          "dpa.w.ph         $ac3,             %[p2],          %[filter34]     \n\t" /* odd 1 */
+          "dpa.w.ph         $ac3,             %[p3],          %[filter56]     \n\t" /* odd 1 */
+          "dpa.w.ph         $ac3,             %[p4],          %[filter78]     \n\t" /* odd 1 */
+          "extp             %[Temp3],         $ac3,           31              \n\t" /* odd 1 */
+          "lbux             %[st2],           %[Temp2](%[cm])                 \n\t" /* even 8 */
+
+          /* odd 2. pixel */
+          "mtlo             %[vector_64],     $ac2                            \n\t" /* odd 3 */
+          "mthi             $zero,            $ac2                            \n\t"
+          "preceu.ph.qbr    %[p1],            %[qload2]                       \n\t"
+          "preceu.ph.qbl    %[p5],            %[qload2]                       \n\t"
+          "sb               %[st2],           0(%[dst])                       \n\t" /* even 8 */
+          "ulw              %[qload1],        13(%[src])                      \n\t"
+          "dpa.w.ph         $ac1,             %[p2],          %[filter12]     \n\t" /* odd 2 */
+          "dpa.w.ph         $ac1,             %[p3],          %[filter34]     \n\t" /* odd 2 */
+          "dpa.w.ph         $ac1,             %[p4],          %[filter56]     \n\t" /* odd 2 */
+          "dpa.w.ph         $ac1,             %[p1],          %[filter78]     \n\t" /* odd 2 */
+          "extp             %[Temp1],         $ac1,           31              \n\t" /* odd 2 */
+          "lbux             %[st3],           %[Temp3](%[cm])                 \n\t" /* odd 1 */
+
+          /* odd 3. pixel */
+          "mtlo             %[vector_64],     $ac3                            \n\t" /* odd 4 */
+          "mthi             $zero,            $ac3                            \n\t"
+          "preceu.ph.qbr    %[p2],            %[qload1]                       \n\t"
+          "sb               %[st3],           0(%[odd_dst])                   \n\t" /* odd 1 */
+          "addu             %[odd_dst],       %[odd_dst],     %[dst_pitch_2]  \n\t"
+          "dpa.w.ph         $ac2,             %[p3],          %[filter12]     \n\t" /* odd 3 */
+          "dpa.w.ph         $ac2,             %[p4],          %[filter34]     \n\t" /* odd 3 */
+          "dpa.w.ph         $ac2,             %[p1],          %[filter56]     \n\t" /* odd 3 */
+          "dpa.w.ph         $ac2,             %[p5],          %[filter78]     \n\t" /* odd 3 */
+          "extp             %[Temp2],         $ac2,           31              \n\t" /* odd 3 */
+          "lbux             %[st1],           %[Temp1](%[cm])                 \n\t" /* odd 2 */
+
+          /* odd 4. pixel */
+          "mtlo             %[vector_64],     $ac1                            \n\t" /* odd 5 */
+          "mthi             $zero,            $ac1                            \n\t"
+          "preceu.ph.qbl    %[p3],            %[qload1]                       \n\t"
+          "sb               %[st1],           0(%[odd_dst])                   \n\t" /* odd 2 */
+          "addu             %[odd_dst],       %[odd_dst],     %[dst_pitch_2]  \n\t"
+          "ulw              %[qload2],        17(%[src])                      \n\t"
+          "dpa.w.ph         $ac3,             %[p4],          %[filter12]     \n\t" /* odd 4 */
+          "dpa.w.ph         $ac3,             %[p1],          %[filter34]     \n\t" /* odd 4 */
+          "dpa.w.ph         $ac3,             %[p5],          %[filter56]     \n\t" /* odd 4 */
+          "dpa.w.ph         $ac3,             %[p2],          %[filter78]     \n\t" /* odd 4 */
+          "extp             %[Temp3],         $ac3,           31              \n\t" /* odd 4 */
+          "lbux             %[st2],           %[Temp2](%[cm])                 \n\t" /* odd 3 */
+
+          /* odd 5. pixel */
+          "mtlo             %[vector_64],     $ac2                            \n\t" /* odd 6 */
+          "mthi             $zero,            $ac2                            \n\t"
+          "preceu.ph.qbr    %[p4],            %[qload2]                       \n\t"
+          "sb               %[st2],           0(%[odd_dst])                   \n\t" /* odd 3 */
+          "addu             %[odd_dst],       %[odd_dst],     %[dst_pitch_2]  \n\t"
+          "dpa.w.ph         $ac1,             %[p1],          %[filter12]     \n\t" /* odd 5 */
+          "dpa.w.ph         $ac1,             %[p5],          %[filter34]     \n\t" /* odd 5 */
+          "dpa.w.ph         $ac1,             %[p2],          %[filter56]     \n\t" /* odd 5 */
+          "dpa.w.ph         $ac1,             %[p3],          %[filter78]     \n\t" /* odd 5 */
+          "extp             %[Temp1],         $ac1,           31              \n\t" /* odd 5 */
+          "lbux             %[st3],           %[Temp3](%[cm])                 \n\t" /* odd 4 */
+
+          /* odd 6. pixel */
+          "mtlo             %[vector_64],     $ac3                            \n\t" /* odd 7 */
+          "mthi             $zero,            $ac3                            \n\t"
+          "preceu.ph.qbl    %[p1],            %[qload2]                       \n\t"
+          "sb               %[st3],           0(%[odd_dst])                   \n\t" /* odd 4 */
+          "addu             %[odd_dst],       %[odd_dst],     %[dst_pitch_2]  \n\t"
+          "ulw              %[qload1],        21(%[src])                      \n\t"
+          "dpa.w.ph         $ac2,             %[p5],          %[filter12]     \n\t" /* odd 6 */
+          "dpa.w.ph         $ac2,             %[p2],          %[filter34]     \n\t" /* odd 6 */
+          "dpa.w.ph         $ac2,             %[p3],          %[filter56]     \n\t" /* odd 6 */
+          "dpa.w.ph         $ac2,             %[p4],          %[filter78]     \n\t" /* odd 6 */
+          "extp             %[Temp2],         $ac2,           31              \n\t" /* odd 6 */
+          "lbux             %[st1],           %[Temp1](%[cm])                 \n\t" /* odd 5 */
+
+          /* odd 7. pixel */
+          "mtlo             %[vector_64],     $ac1                            \n\t" /* odd 8 */
+          "mthi             $zero,            $ac1                            \n\t"
+          "preceu.ph.qbr    %[p5],            %[qload1]                       \n\t"
+          "sb               %[st1],           0(%[odd_dst])                   \n\t" /* odd 5 */
+          "addu             %[odd_dst],       %[odd_dst],     %[dst_pitch_2]  \n\t"
+          "dpa.w.ph         $ac3,             %[p2],          %[filter12]     \n\t" /* odd 7 */
+          "dpa.w.ph         $ac3,             %[p3],          %[filter34]     \n\t" /* odd 7 */
+          "dpa.w.ph         $ac3,             %[p4],          %[filter56]     \n\t" /* odd 7 */
+          "dpa.w.ph         $ac3,             %[p1],          %[filter78]     \n\t" /* odd 7 */
+          "extp             %[Temp3],         $ac3,           31              \n\t" /* odd 7 */
+
+          /* odd 8. pixel */
+          "dpa.w.ph         $ac1,             %[p3],          %[filter12]     \n\t" /* odd 8 */
+          "dpa.w.ph         $ac1,             %[p4],          %[filter34]     \n\t" /* odd 8 */
+          "dpa.w.ph         $ac1,             %[p1],          %[filter56]     \n\t" /* odd 8 */
+          "dpa.w.ph         $ac1,             %[p5],          %[filter78]     \n\t" /* odd 8 */
+          "extp             %[Temp1],         $ac1,           31              \n\t" /* odd 8 */
+
+          "lbux             %[st2],           %[Temp2](%[cm])                 \n\t" /* odd 6 */
+          "lbux             %[st3],           %[Temp3](%[cm])                 \n\t" /* odd 7 */
+          "lbux             %[st1],           %[Temp1](%[cm])                 \n\t" /* odd 8 */
+
+          "sb               %[st2],           0(%[odd_dst])                   \n\t" /* odd 6 */
+          "addu             %[odd_dst],       %[odd_dst],     %[dst_pitch_2]  \n\t"
+
+          "sb               %[st3],           0(%[odd_dst])                   \n\t" /* odd 7 */
+          "addu             %[odd_dst],       %[odd_dst],     %[dst_pitch_2]  \n\t"
+
+          "sb               %[st1],           0(%[odd_dst])                   \n\t" /* odd 8 */
+
+          : [qload1] "=&r" (qload1), [qload2] "=&r" (qload2), [p5] "=&r" (p5),
+            [st1] "=&r" (st1), [st2] "=&r" (st2), [st3] "=&r" (st3),
+            [p1] "=&r" (p1), [p2] "=&r" (p2), [p3] "=&r" (p3), [p4] "=&r" (p4),
+            [Temp1] "=&r" (Temp1), [Temp2] "=&r" (Temp2), [Temp3] "=&r" (Temp3),
+            [dst] "+r" (dst), [odd_dst] "+r" (odd_dst)
+          : [filter12] "r" (filter12), [filter34] "r" (filter34),
+            [filter56] "r" (filter56), [filter78] "r" (filter78),
+            [vector_64] "r" (vector_64), [cm] "r" (cm),
+            [src] "r" (src), [dst_pitch_2] "r" (dst_pitch_2)
+      );
+
+      src += 16;
+      dst = (dst_ptr + ((c + 1) * 16 * dst_stride));
+      odd_dst = (dst + dst_stride);
+    }
+
+    /* Next row... */
+    src_ptr += src_stride;
+
+    dst_ptr += 1;
+  }
+}
+
+static void convolve_horiz_64_transposed_dspr2(const uint8_t *src_ptr,
+                                               int32_t src_stride,
+                                               uint8_t *dst_ptr,
+                                               int32_t dst_stride,
+                                               const int16_t *filter_x0,
+                                               int32_t h) {
+  int32_t c, y;
+  const uint8_t *src;
+  uint8_t *dst;
+  uint8_t *cm = vpx_ff_cropTbl;
+  uint32_t vector_64 = 64;
+  int32_t  filter12, filter34, filter56, filter78;
+  int32_t  Temp1, Temp2, Temp3;
+  uint32_t qload1, qload2;
+  uint32_t p1, p2, p3, p4, p5;
+  uint32_t st1, st2, st3;
+  uint32_t dst_pitch_2 = (dst_stride << 1);
+  uint8_t  *odd_dst;
+
+  filter12 = ((const int32_t *)filter_x0)[0];
+  filter34 = ((const int32_t *)filter_x0)[1];
+  filter56 = ((const int32_t *)filter_x0)[2];
+  filter78 = ((const int32_t *)filter_x0)[3];
+
+  for (y = h; y--;) {
+    /* prefetch data to cache memory */
+    prefetch_load(src_ptr + src_stride);
+    prefetch_load(src_ptr + src_stride + 32);
+    prefetch_load(src_ptr + src_stride + 64);
+
+    src = src_ptr;
+    dst = dst_ptr;
+
+    odd_dst = (dst + dst_stride);
+
+    for (c = 0; c < 4; c++) {
+      __asm__ __volatile__ (
+          "ulw              %[qload1],        0(%[src])                       \n\t"
+          "ulw              %[qload2],        4(%[src])                       \n\t"
+
+          /* even 1. pixel */
+          "mtlo             %[vector_64],     $ac1                            \n\t" /* even 1 */
+          "mthi             $zero,            $ac1                            \n\t"
+          "mtlo             %[vector_64],     $ac2                            \n\t" /* even 2 */
+          "mthi             $zero,            $ac2                            \n\t"
+          "preceu.ph.qbr    %[p3],            %[qload2]                       \n\t"
+          "preceu.ph.qbl    %[p4],            %[qload2]                       \n\t"
+          "preceu.ph.qbr    %[p1],            %[qload1]                       \n\t"
+          "preceu.ph.qbl    %[p2],            %[qload1]                       \n\t"
+          "ulw              %[qload2],        8(%[src])                       \n\t"
+          "dpa.w.ph         $ac1,             %[p1],          %[filter12]     \n\t" /* even 1 */
+          "dpa.w.ph         $ac1,             %[p2],          %[filter34]     \n\t" /* even 1 */
+          "dpa.w.ph         $ac1,             %[p3],          %[filter56]     \n\t" /* even 1 */
+          "dpa.w.ph         $ac1,             %[p4],          %[filter78]     \n\t" /* even 1 */
+          "extp             %[Temp1],         $ac1,           31              \n\t" /* even 1 */
+
+          /* even 2. pixel */
+          "mtlo             %[vector_64],     $ac3                            \n\t" /* even 3 */
+          "mthi             $zero,            $ac3                            \n\t"
+          "preceu.ph.qbr    %[p1],            %[qload2]                       \n\t"
+          "preceu.ph.qbl    %[p5],            %[qload2]                       \n\t"
+          "ulw              %[qload1],        12(%[src])                      \n\t"
+          "dpa.w.ph         $ac2,             %[p2],          %[filter12]     \n\t" /* even 1 */
+          "dpa.w.ph         $ac2,             %[p3],          %[filter34]     \n\t" /* even 1 */
+          "dpa.w.ph         $ac2,             %[p4],          %[filter56]     \n\t" /* even 1 */
+          "dpa.w.ph         $ac2,             %[p1],          %[filter78]     \n\t" /* even 1 */
+          "lbux             %[st1],           %[Temp1](%[cm])                 \n\t" /* even 1 */
+          "extp             %[Temp2],         $ac2,           31              \n\t" /* even 1 */
+
+          /* even 3. pixel */
+          "mtlo             %[vector_64],     $ac1                            \n\t" /* even 4 */
+          "mthi             $zero,            $ac1                            \n\t"
+          "preceu.ph.qbr    %[p2],            %[qload1]                       \n\t"
+          "sb               %[st1],           0(%[dst])                       \n\t" /* even 1 */
+          "addu             %[dst],           %[dst],         %[dst_pitch_2]             \n\t"
+          "dpa.w.ph         $ac3,             %[p3],          %[filter12]     \n\t" /* even 3 */
+          "dpa.w.ph         $ac3,             %[p4],          %[filter34]     \n\t" /* even 3 */
+          "dpa.w.ph         $ac3,             %[p1],          %[filter56]     \n\t" /* even 3 */
+          "dpa.w.ph         $ac3,             %[p5],          %[filter78]     \n\t" /* even 3 */
+          "extp             %[Temp3],         $ac3,           31              \n\t" /* even 3 */
+          "lbux             %[st2],           %[Temp2](%[cm])                 \n\t" /* even 1 */
+
+          /* even 4. pixel */
+          "mtlo             %[vector_64],     $ac2                            \n\t" /* even 5 */
+          "mthi             $zero,            $ac2                            \n\t"
+          "preceu.ph.qbl    %[p3],            %[qload1]                       \n\t"
+          "sb               %[st2],           0(%[dst])                       \n\t" /* even 2 */
+          "addu             %[dst],           %[dst],         %[dst_pitch_2]  \n\t"
+          "ulw              %[qload2],        16(%[src])                      \n\t"
+          "dpa.w.ph         $ac1,             %[p4],          %[filter12]     \n\t" /* even 4 */
+          "dpa.w.ph         $ac1,             %[p1],          %[filter34]     \n\t" /* even 4 */
+          "dpa.w.ph         $ac1,             %[p5],          %[filter56]     \n\t" /* even 4 */
+          "dpa.w.ph         $ac1,             %[p2],          %[filter78]     \n\t" /* even 4 */
+          "extp             %[Temp1],         $ac1,           31              \n\t" /* even 4 */
+          "lbux             %[st3],           %[Temp3](%[cm])                 \n\t" /* even 3 */
+
+          /* even 5. pixel */
+          "mtlo             %[vector_64],     $ac3                            \n\t" /* even 6 */
+          "mthi             $zero,            $ac3                            \n\t"
+          "preceu.ph.qbr    %[p4],            %[qload2]                       \n\t"
+          "sb               %[st3],           0(%[dst])                       \n\t" /* even 3 */
+          "addu             %[dst],           %[dst],         %[dst_pitch_2]  \n\t"
+          "dpa.w.ph         $ac2,             %[p1],          %[filter12]     \n\t" /* even 5 */
+          "dpa.w.ph         $ac2,             %[p5],          %[filter34]     \n\t" /* even 5 */
+          "dpa.w.ph         $ac2,             %[p2],          %[filter56]     \n\t" /* even 5 */
+          "dpa.w.ph         $ac2,             %[p3],          %[filter78]     \n\t" /* even 5 */
+          "extp             %[Temp2],         $ac2,           31              \n\t" /* even 5 */
+          "lbux             %[st1],           %[Temp1](%[cm])                 \n\t" /* even 4 */
+
+          /* even 6. pixel */
+          "mtlo             %[vector_64],     $ac1                            \n\t" /* even 7 */
+          "mthi             $zero,            $ac1                            \n\t"
+          "preceu.ph.qbl    %[p1],            %[qload2]                       \n\t"
+          "sb               %[st1],           0(%[dst])                       \n\t" /* even 4 */
+          "addu             %[dst],           %[dst],         %[dst_pitch_2]  \n\t"
+          "ulw              %[qload1],        20(%[src])                      \n\t"
+          "dpa.w.ph         $ac3,             %[p5],          %[filter12]     \n\t" /* even 6 */
+          "dpa.w.ph         $ac3,             %[p2],          %[filter34]     \n\t" /* even 6 */
+          "dpa.w.ph         $ac3,             %[p3],          %[filter56]     \n\t" /* even 6 */
+          "dpa.w.ph         $ac3,             %[p4],          %[filter78]     \n\t" /* even 6 */
+          "extp             %[Temp3],         $ac3,           31              \n\t" /* even 6 */
+          "lbux             %[st2],           %[Temp2](%[cm])                 \n\t" /* even 5 */
+
+          /* even 7. pixel */
+          "mtlo             %[vector_64],     $ac2                            \n\t" /* even 8 */
+          "mthi             $zero,            $ac2                            \n\t"
+          "preceu.ph.qbr    %[p5],            %[qload1]                       \n\t"
+          "sb               %[st2],           0(%[dst])                       \n\t" /* even 5 */
+          "addu             %[dst],           %[dst],         %[dst_pitch_2]  \n\t"
+          "dpa.w.ph         $ac1,             %[p2],          %[filter12]     \n\t" /* even 7 */
+          "dpa.w.ph         $ac1,             %[p3],          %[filter34]     \n\t" /* even 7 */
+          "dpa.w.ph         $ac1,             %[p4],          %[filter56]     \n\t" /* even 7 */
+          "dpa.w.ph         $ac1,             %[p1],          %[filter78]     \n\t" /* even 7 */
+          "extp             %[Temp1],         $ac1,           31              \n\t" /* even 7 */
+          "lbux             %[st3],           %[Temp3](%[cm])                 \n\t" /* even 6 */
+
+          /* even 8. pixel */
+          "mtlo             %[vector_64],     $ac3                            \n\t" /* odd 1 */
+          "mthi             $zero,            $ac3                            \n\t"
+          "dpa.w.ph         $ac2,             %[p3],          %[filter12]     \n\t" /* even 8 */
+          "dpa.w.ph         $ac2,             %[p4],          %[filter34]     \n\t" /* even 8 */
+          "sb               %[st3],           0(%[dst])                       \n\t" /* even 6 */
+          "addu             %[dst],           %[dst],         %[dst_pitch_2]  \n\t"
+          "dpa.w.ph         $ac2,             %[p1],          %[filter56]     \n\t" /* even 8 */
+          "dpa.w.ph         $ac2,             %[p5],          %[filter78]     \n\t" /* even 8 */
+          "extp             %[Temp2],         $ac2,           31              \n\t" /* even 8 */
+          "lbux             %[st1],           %[Temp1](%[cm])                 \n\t" /* even 7 */
+
+          /* ODD pixels */
+          "ulw              %[qload1],        1(%[src])                       \n\t"
+          "ulw              %[qload2],        5(%[src])                       \n\t"
+
+          /* odd 1. pixel */
+          "mtlo             %[vector_64],     $ac1                            \n\t" /* odd 2 */
+          "mthi             $zero,            $ac1                            \n\t"
+          "preceu.ph.qbr    %[p1],            %[qload1]                       \n\t"
+          "preceu.ph.qbl    %[p2],            %[qload1]                       \n\t"
+          "preceu.ph.qbr    %[p3],            %[qload2]                       \n\t"
+          "preceu.ph.qbl    %[p4],            %[qload2]                       \n\t"
+          "sb               %[st1],           0(%[dst])                       \n\t" /* even 7 */
+          "addu             %[dst],           %[dst],         %[dst_pitch_2]  \n\t"
+          "ulw              %[qload2],        9(%[src])                       \n\t"
+          "dpa.w.ph         $ac3,             %[p1],          %[filter12]     \n\t" /* odd 1 */
+          "dpa.w.ph         $ac3,             %[p2],          %[filter34]     \n\t" /* odd 1 */
+          "dpa.w.ph         $ac3,             %[p3],          %[filter56]     \n\t" /* odd 1 */
+          "dpa.w.ph         $ac3,             %[p4],          %[filter78]     \n\t" /* odd 1 */
+          "extp             %[Temp3],         $ac3,           31              \n\t" /* odd 1 */
+          "lbux             %[st2],           %[Temp2](%[cm])                 \n\t" /* even 8 */
+
+          /* odd 2. pixel */
+          "mtlo             %[vector_64],     $ac2                            \n\t" /* odd 3 */
+          "mthi             $zero,            $ac2                            \n\t"
+          "preceu.ph.qbr    %[p1],            %[qload2]                       \n\t"
+          "preceu.ph.qbl    %[p5],            %[qload2]                       \n\t"
+          "sb               %[st2],           0(%[dst])                       \n\t" /* even 8 */
+          "ulw              %[qload1],        13(%[src])                      \n\t"
+          "dpa.w.ph         $ac1,             %[p2],          %[filter12]     \n\t" /* odd 2 */
+          "dpa.w.ph         $ac1,             %[p3],          %[filter34]     \n\t" /* odd 2 */
+          "dpa.w.ph         $ac1,             %[p4],          %[filter56]     \n\t" /* odd 2 */
+          "dpa.w.ph         $ac1,             %[p1],          %[filter78]     \n\t" /* odd 2 */
+          "extp             %[Temp1],         $ac1,           31              \n\t" /* odd 2 */
+          "lbux             %[st3],           %[Temp3](%[cm])                 \n\t" /* odd 1 */
+
+          /* odd 3. pixel */
+          "mtlo             %[vector_64],     $ac3                            \n\t" /* odd 4 */
+          "mthi             $zero,            $ac3                            \n\t"
+          "preceu.ph.qbr    %[p2],            %[qload1]                       \n\t"
+          "sb               %[st3],           0(%[odd_dst])                   \n\t" /* odd 1 */
+          "addu             %[odd_dst],       %[odd_dst],     %[dst_pitch_2]  \n\t"
+          "dpa.w.ph         $ac2,             %[p3],          %[filter12]     \n\t" /* odd 3 */
+          "dpa.w.ph         $ac2,             %[p4],          %[filter34]     \n\t" /* odd 3 */
+          "dpa.w.ph         $ac2,             %[p1],          %[filter56]     \n\t" /* odd 3 */
+          "dpa.w.ph         $ac2,             %[p5],          %[filter78]     \n\t" /* odd 3 */
+          "extp             %[Temp2],         $ac2,           31              \n\t" /* odd 3 */
+          "lbux             %[st1],           %[Temp1](%[cm])                 \n\t" /* odd 2 */
+
+          /* odd 4. pixel */
+          "mtlo             %[vector_64],     $ac1                            \n\t" /* odd 5 */
+          "mthi             $zero,            $ac1                            \n\t"
+          "preceu.ph.qbl    %[p3],            %[qload1]                       \n\t"
+          "sb               %[st1],           0(%[odd_dst])                   \n\t" /* odd 2 */
+          "addu             %[odd_dst],       %[odd_dst],     %[dst_pitch_2]  \n\t"
+          "ulw              %[qload2],        17(%[src])                      \n\t"
+          "dpa.w.ph         $ac3,             %[p4],          %[filter12]     \n\t" /* odd 4 */
+          "dpa.w.ph         $ac3,             %[p1],          %[filter34]     \n\t" /* odd 4 */
+          "dpa.w.ph         $ac3,             %[p5],          %[filter56]     \n\t" /* odd 4 */
+          "dpa.w.ph         $ac3,             %[p2],          %[filter78]     \n\t" /* odd 4 */
+          "extp             %[Temp3],         $ac3,           31              \n\t" /* odd 4 */
+          "lbux             %[st2],           %[Temp2](%[cm])                 \n\t" /* odd 3 */
+
+          /* odd 5. pixel */
+          "mtlo             %[vector_64],     $ac2                            \n\t" /* odd 6 */
+          "mthi             $zero,            $ac2                            \n\t"
+          "preceu.ph.qbr    %[p4],            %[qload2]                       \n\t"
+          "sb               %[st2],           0(%[odd_dst])                   \n\t" /* odd 3 */
+          "addu             %[odd_dst],       %[odd_dst],     %[dst_pitch_2]  \n\t"
+          "dpa.w.ph         $ac1,             %[p1],          %[filter12]     \n\t" /* odd 5 */
+          "dpa.w.ph         $ac1,             %[p5],          %[filter34]     \n\t" /* odd 5 */
+          "dpa.w.ph         $ac1,             %[p2],          %[filter56]     \n\t" /* odd 5 */
+          "dpa.w.ph         $ac1,             %[p3],          %[filter78]     \n\t" /* odd 5 */
+          "extp             %[Temp1],         $ac1,           31              \n\t" /* odd 5 */
+          "lbux             %[st3],           %[Temp3](%[cm])                 \n\t" /* odd 4 */
+
+          /* odd 6. pixel */
+          "mtlo             %[vector_64],     $ac3                            \n\t" /* odd 7 */
+          "mthi             $zero,            $ac3                            \n\t"
+          "preceu.ph.qbl    %[p1],            %[qload2]                       \n\t"
+          "sb               %[st3],           0(%[odd_dst])                   \n\t" /* odd 4 */
+          "addu             %[odd_dst],       %[odd_dst],     %[dst_pitch_2]  \n\t"
+          "ulw              %[qload1],        21(%[src])                      \n\t"
+          "dpa.w.ph         $ac2,             %[p5],          %[filter12]     \n\t" /* odd 6 */
+          "dpa.w.ph         $ac2,             %[p2],          %[filter34]     \n\t" /* odd 6 */
+          "dpa.w.ph         $ac2,             %[p3],          %[filter56]     \n\t" /* odd 6 */
+          "dpa.w.ph         $ac2,             %[p4],          %[filter78]     \n\t" /* odd 6 */
+          "extp             %[Temp2],         $ac2,           31              \n\t" /* odd 6 */
+          "lbux             %[st1],           %[Temp1](%[cm])                 \n\t" /* odd 5 */
+
+          /* odd 7. pixel */
+          "mtlo             %[vector_64],     $ac1                            \n\t" /* odd 8 */
+          "mthi             $zero,            $ac1                            \n\t"
+          "preceu.ph.qbr    %[p5],            %[qload1]                       \n\t"
+          "sb               %[st1],           0(%[odd_dst])                   \n\t" /* odd 5 */
+          "addu             %[odd_dst],       %[odd_dst],     %[dst_pitch_2]  \n\t"
+          "dpa.w.ph         $ac3,             %[p2],          %[filter12]     \n\t" /* odd 7 */
+          "dpa.w.ph         $ac3,             %[p3],          %[filter34]     \n\t" /* odd 7 */
+          "dpa.w.ph         $ac3,             %[p4],          %[filter56]     \n\t" /* odd 7 */
+          "dpa.w.ph         $ac3,             %[p1],          %[filter78]     \n\t" /* odd 7 */
+          "extp             %[Temp3],         $ac3,           31              \n\t" /* odd 7 */
+
+          /* odd 8. pixel */
+          "dpa.w.ph         $ac1,             %[p3],          %[filter12]     \n\t" /* odd 8 */
+          "dpa.w.ph         $ac1,             %[p4],          %[filter34]     \n\t" /* odd 8 */
+          "dpa.w.ph         $ac1,             %[p1],          %[filter56]     \n\t" /* odd 8 */
+          "dpa.w.ph         $ac1,             %[p5],          %[filter78]     \n\t" /* odd 8 */
+          "extp             %[Temp1],         $ac1,           31              \n\t" /* odd 8 */
+
+          "lbux             %[st2],           %[Temp2](%[cm])                 \n\t" /* odd 6 */
+          "lbux             %[st3],           %[Temp3](%[cm])                 \n\t" /* odd 7 */
+          "lbux             %[st1],           %[Temp1](%[cm])                 \n\t" /* odd 8 */
+
+          "sb               %[st2],           0(%[odd_dst])                   \n\t" /* odd 6 */
+          "addu             %[odd_dst],       %[odd_dst],     %[dst_pitch_2]  \n\t"
+
+          "sb               %[st3],           0(%[odd_dst])                   \n\t" /* odd 7 */
+          "addu             %[odd_dst],       %[odd_dst],     %[dst_pitch_2]  \n\t"
+
+          "sb               %[st1],           0(%[odd_dst])                   \n\t" /* odd 8 */
+
+          : [qload1] "=&r" (qload1), [qload2] "=&r" (qload2), [p5] "=&r" (p5),
+            [st1] "=&r" (st1), [st2] "=&r" (st2), [st3] "=&r" (st3),
+            [p1] "=&r" (p1), [p2] "=&r" (p2), [p3] "=&r" (p3), [p4] "=&r" (p4),
+            [Temp1] "=&r" (Temp1), [Temp2] "=&r" (Temp2), [Temp3] "=&r" (Temp3),
+            [dst] "+r" (dst), [odd_dst] "+r" (odd_dst)
+          : [filter12] "r" (filter12), [filter34] "r" (filter34),
+            [filter56] "r" (filter56), [filter78] "r" (filter78),
+            [vector_64] "r" (vector_64), [cm] "r" (cm),
+            [src] "r" (src), [dst_pitch_2] "r" (dst_pitch_2)
+      );
+
+      src += 16;
+      dst = (dst_ptr + ((c + 1) * 16 * dst_stride));
+      odd_dst = (dst + dst_stride);
+    }
+
+    /* Next row... */
+    src_ptr += src_stride;
+
+    dst_ptr += 1;
+  }
+}
+
+void convolve_horiz_transposed(const uint8_t *src, ptrdiff_t src_stride,
+                               uint8_t *dst, ptrdiff_t dst_stride,
+                               const int16_t *filter, int w, int h) {
+  int x, y, k;
+
+  for (y = 0; y < h; ++y) {
+    for (x = 0; x < w; ++x) {
+      int sum = 0;
+
+      for (k = 0; k < 8; ++k)
+        sum += src[x + k] * filter[k];
+
+      dst[x * dst_stride] = clip_pixel(ROUND_POWER_OF_TWO(sum, FILTER_BITS));
+    }
+
+    src += src_stride;
+    dst += 1;
+  }
+}
+
+void copy_horiz_transposed(const uint8_t *src, ptrdiff_t src_stride,
+                           uint8_t *dst, ptrdiff_t dst_stride,
+                           int w, int h) {
+  int x, y;
+
+  for (y = 0; y < h; ++y) {
+    for (x = 0; x < w; ++x) {
+      dst[x * dst_stride] = src[x];
+    }
+
+    src += src_stride;
+    dst += 1;
+  }
+}
+
+void vpx_convolve8_dspr2(const uint8_t *src, ptrdiff_t src_stride,
+                         uint8_t *dst, ptrdiff_t dst_stride,
+                         const int16_t *filter_x, int x_step_q4,
+                         const int16_t *filter_y, int y_step_q4,
+                         int w, int h) {
+  DECLARE_ALIGNED(32, uint8_t, temp[64 * 135]);
+  int32_t intermediate_height = ((h * y_step_q4) >> 4) + 7;
+  uint32_t pos = 38;
+
+  /* bit positon for extract from acc */
+  __asm__ __volatile__ (
+    "wrdsp      %[pos],     1           \n\t"
+    :
+    : [pos] "r" (pos)
+  );
+
+  if (intermediate_height < h)
+    intermediate_height = h;
+
+  if (x_step_q4 != 16 || y_step_q4 != 16)
+    return vpx_convolve8_c(src, src_stride,
+                           dst, dst_stride,
+                           filter_x, x_step_q4,
+                           filter_y, y_step_q4,
+                           w, h);
+
+  if ((((const int32_t *)filter_x)[1] == 0x800000)
+      && (((const int32_t *)filter_y)[1] == 0x800000))
+    return vpx_convolve_copy(src, src_stride,
+                             dst, dst_stride,
+                             filter_x, x_step_q4,
+                             filter_y, y_step_q4,
+                             w, h);
+
+  /* copy the src to dst */
+  if (filter_x[3] == 0x80) {
+    copy_horiz_transposed(src - src_stride * 3, src_stride,
+                          temp, intermediate_height,
+                          w, intermediate_height);
+  } else if (((const int32_t *)filter_x)[0] == 0) {
+    vpx_convolve2_dspr2(src - src_stride * 3, src_stride,
+                        temp, intermediate_height,
+                        filter_x,
+                        w, intermediate_height);
+  } else {
+    src -= (src_stride * 3 + 3);
+
+    /* prefetch data to cache memory */
+    prefetch_load(src);
+    prefetch_load(src + 32);
+
+    switch (w) {
+      case 4:
+        convolve_horiz_4_transposed_dspr2(src, src_stride,
+                                          temp, intermediate_height,
+                                          filter_x, intermediate_height);
+        break;
+      case 8:
+        convolve_horiz_8_transposed_dspr2(src, src_stride,
+                                          temp, intermediate_height,
+                                          filter_x, intermediate_height);
+        break;
+      case 16:
+      case 32:
+        convolve_horiz_16_transposed_dspr2(src, src_stride,
+                                           temp, intermediate_height,
+                                           filter_x, intermediate_height,
+                                           (w/16));
+        break;
+      case 64:
+        prefetch_load(src + 32);
+        convolve_horiz_64_transposed_dspr2(src, src_stride,
+                                           temp, intermediate_height,
+                                           filter_x, intermediate_height);
+        break;
+      default:
+        convolve_horiz_transposed(src, src_stride,
+                                  temp, intermediate_height,
+                                  filter_x, w, intermediate_height);
+        break;
+    }
+  }
+
+  /* copy the src to dst */
+  if (filter_y[3] == 0x80) {
+    copy_horiz_transposed(temp + 3, intermediate_height,
+                          dst, dst_stride,
+                          h, w);
+  } else if (((const int32_t *)filter_y)[0] == 0) {
+    vpx_convolve2_dspr2(temp + 3, intermediate_height,
+                        dst, dst_stride,
+                        filter_y,
+                        h, w);
+  } else {
+    switch (h) {
+      case 4:
+        convolve_horiz_4_transposed_dspr2(temp, intermediate_height,
+                                          dst, dst_stride,
+                                          filter_y, w);
+        break;
+      case 8:
+        convolve_horiz_8_transposed_dspr2(temp, intermediate_height,
+                                          dst, dst_stride,
+                                          filter_y, w);
+        break;
+      case 16:
+      case 32:
+        convolve_horiz_16_transposed_dspr2(temp, intermediate_height,
+                                           dst, dst_stride,
+                                           filter_y, w, (h/16));
+        break;
+      case 64:
+        convolve_horiz_64_transposed_dspr2(temp, intermediate_height,
+                                           dst, dst_stride,
+                                           filter_y, w);
+        break;
+      default:
+        convolve_horiz_transposed(temp, intermediate_height,
+                                  dst, dst_stride,
+                                  filter_y, h, w);
+        break;
+    }
+  }
+}
+
+void vpx_convolve_copy_dspr2(const uint8_t *src, ptrdiff_t src_stride,
+                             uint8_t *dst, ptrdiff_t dst_stride,
+                             const int16_t *filter_x, int filter_x_stride,
+                             const int16_t *filter_y, int filter_y_stride,
+                             int w, int h) {
+  int x, y;
+
+  /* prefetch data to cache memory */
+  prefetch_load(src);
+  prefetch_load(src + 32);
+  prefetch_store(dst);
+
+  switch (w) {
+    case 4:
+      {
+      uint32_t tp1;
+
+      /* 1 word storage */
+      for (y = h; y--; ) {
+        prefetch_load(src + src_stride);
+        prefetch_load(src + src_stride + 32);
+        prefetch_store(dst + dst_stride);
+
+        __asm__ __volatile__ (
+            "ulw              %[tp1],         (%[src])      \n\t"
+            "sw               %[tp1],         (%[dst])      \n\t"  /* store */
+
+            : [tp1] "=&r" (tp1)
+            : [src] "r" (src), [dst] "r" (dst)
+        );
+
+        src += src_stride;
+        dst += dst_stride;
+      }
+      }
+      break;
+    case 8:
+      {
+      uint32_t tp1, tp2;
+
+      /* 2 word storage */
+      for (y = h; y--; ) {
+        prefetch_load(src + src_stride);
+        prefetch_load(src + src_stride + 32);
+        prefetch_store(dst + dst_stride);
+
+        __asm__ __volatile__ (
+            "ulw              %[tp1],         0(%[src])      \n\t"
+            "ulw              %[tp2],         4(%[src])      \n\t"
+            "sw               %[tp1],         0(%[dst])      \n\t"  /* store */
+            "sw               %[tp2],         4(%[dst])      \n\t"  /* store */
+
+            : [tp1] "=&r" (tp1), [tp2] "=&r" (tp2)
+            : [src] "r" (src), [dst] "r" (dst)
+        );
+
+        src += src_stride;
+        dst += dst_stride;
+      }
+      }
+      break;
+    case 16:
+      {
+      uint32_t tp1, tp2, tp3, tp4;
+
+      /* 4 word storage */
+      for (y = h; y--; ) {
+        prefetch_load(src + src_stride);
+        prefetch_load(src + src_stride + 32);
+        prefetch_store(dst + dst_stride);
+
+        __asm__ __volatile__ (
+            "ulw              %[tp1],         0(%[src])      \n\t"
+            "ulw              %[tp2],         4(%[src])      \n\t"
+            "ulw              %[tp3],         8(%[src])      \n\t"
+            "ulw              %[tp4],         12(%[src])     \n\t"
+
+            "sw               %[tp1],         0(%[dst])      \n\t"  /* store */
+            "sw               %[tp2],         4(%[dst])      \n\t"  /* store */
+            "sw               %[tp3],         8(%[dst])      \n\t"  /* store */
+            "sw               %[tp4],         12(%[dst])     \n\t"  /* store */
+
+            : [tp1] "=&r" (tp1), [tp2] "=&r" (tp2),
+              [tp3] "=&r" (tp3), [tp4] "=&r" (tp4)
+            : [src] "r" (src), [dst] "r" (dst)
+        );
+
+        src += src_stride;
+        dst += dst_stride;
+      }
+      }
+      break;
+    case 32:
+      {
+      uint32_t tp1, tp2, tp3, tp4;
+      uint32_t tp5, tp6, tp7, tp8;
+
+      /* 8 word storage */
+      for (y = h; y--; ) {
+        prefetch_load(src + src_stride);
+        prefetch_load(src + src_stride + 32);
+        prefetch_store(dst + dst_stride);
+
+        __asm__ __volatile__ (
+            "ulw              %[tp1],         0(%[src])      \n\t"
+            "ulw              %[tp2],         4(%[src])      \n\t"
+            "ulw              %[tp3],         8(%[src])      \n\t"
+            "ulw              %[tp4],         12(%[src])     \n\t"
+            "ulw              %[tp5],         16(%[src])     \n\t"
+            "ulw              %[tp6],         20(%[src])     \n\t"
+            "ulw              %[tp7],         24(%[src])     \n\t"
+            "ulw              %[tp8],         28(%[src])     \n\t"
+
+            "sw               %[tp1],         0(%[dst])      \n\t"  /* store */
+            "sw               %[tp2],         4(%[dst])      \n\t"  /* store */
+            "sw               %[tp3],         8(%[dst])      \n\t"  /* store */
+            "sw               %[tp4],         12(%[dst])     \n\t"  /* store */
+            "sw               %[tp5],         16(%[dst])     \n\t"  /* store */
+            "sw               %[tp6],         20(%[dst])     \n\t"  /* store */
+            "sw               %[tp7],         24(%[dst])     \n\t"  /* store */
+            "sw               %[tp8],         28(%[dst])     \n\t"  /* store */
+
+            : [tp1] "=&r" (tp1), [tp2] "=&r" (tp2),
+              [tp3] "=&r" (tp3), [tp4] "=&r" (tp4),
+              [tp5] "=&r" (tp5), [tp6] "=&r" (tp6),
+              [tp7] "=&r" (tp7), [tp8] "=&r" (tp8)
+            : [src] "r" (src), [dst] "r" (dst)
+        );
+
+        src += src_stride;
+        dst += dst_stride;
+      }
+      }
+      break;
+    case 64:
+      {
+      uint32_t tp1, tp2, tp3, tp4;
+      uint32_t tp5, tp6, tp7, tp8;
+
+      prefetch_load(src + 64);
+      prefetch_store(dst + 32);
+
+      /* 16 word storage */
+      for (y = h; y--; ) {
+        prefetch_load(src + src_stride);
+        prefetch_load(src + src_stride + 32);
+        prefetch_load(src + src_stride + 64);
+        prefetch_store(dst + dst_stride);
+        prefetch_store(dst + dst_stride + 32);
+
+        __asm__ __volatile__ (
+            "ulw              %[tp1],         0(%[src])      \n\t"
+            "ulw              %[tp2],         4(%[src])      \n\t"
+            "ulw              %[tp3],         8(%[src])      \n\t"
+            "ulw              %[tp4],         12(%[src])     \n\t"
+            "ulw              %[tp5],         16(%[src])     \n\t"
+            "ulw              %[tp6],         20(%[src])     \n\t"
+            "ulw              %[tp7],         24(%[src])     \n\t"
+            "ulw              %[tp8],         28(%[src])     \n\t"
+
+            "sw               %[tp1],         0(%[dst])      \n\t"  /* store */
+            "sw               %[tp2],         4(%[dst])      \n\t"  /* store */
+            "sw               %[tp3],         8(%[dst])      \n\t"  /* store */
+            "sw               %[tp4],         12(%[dst])     \n\t"  /* store */
+            "sw               %[tp5],         16(%[dst])     \n\t"  /* store */
+            "sw               %[tp6],         20(%[dst])     \n\t"  /* store */
+            "sw               %[tp7],         24(%[dst])     \n\t"  /* store */
+            "sw               %[tp8],         28(%[dst])     \n\t"  /* store */
+
+            "ulw              %[tp1],         32(%[src])     \n\t"
+            "ulw              %[tp2],         36(%[src])     \n\t"
+            "ulw              %[tp3],         40(%[src])     \n\t"
+            "ulw              %[tp4],         44(%[src])     \n\t"
+            "ulw              %[tp5],         48(%[src])     \n\t"
+            "ulw              %[tp6],         52(%[src])     \n\t"
+            "ulw              %[tp7],         56(%[src])     \n\t"
+            "ulw              %[tp8],         60(%[src])     \n\t"
+
+            "sw               %[tp1],         32(%[dst])     \n\t"  /* store */
+            "sw               %[tp2],         36(%[dst])     \n\t"  /* store */
+            "sw               %[tp3],         40(%[dst])     \n\t"  /* store */
+            "sw               %[tp4],         44(%[dst])     \n\t"  /* store */
+            "sw               %[tp5],         48(%[dst])     \n\t"  /* store */
+            "sw               %[tp6],         52(%[dst])     \n\t"  /* store */
+            "sw               %[tp7],         56(%[dst])     \n\t"  /* store */
+            "sw               %[tp8],         60(%[dst])     \n\t"  /* store */
+
+            : [tp1] "=&r" (tp1), [tp2] "=&r" (tp2),
+              [tp3] "=&r" (tp3), [tp4] "=&r" (tp4),
+              [tp5] "=&r" (tp5), [tp6] "=&r" (tp6),
+              [tp7] "=&r" (tp7), [tp8] "=&r" (tp8)
+            : [src] "r" (src), [dst] "r" (dst)
+        );
+
+        src += src_stride;
+        dst += dst_stride;
+      }
+      }
+      break;
+    default:
+      for (y = h; y--; ) {
+        for (x = 0; x < w; ++x) {
+          dst[x] = src[x];
+        }
+
+        src += src_stride;
+        dst += dst_stride;
+      }
+      break;
+  }
+}
+#endif
--- /dev/null
+++ b/vpx_dsp/mips/convolve8_horiz_dspr2.c
@@ -1,0 +1,921 @@
+/*
+ *  Copyright (c) 2013 The WebM project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <assert.h>
+#include <stdio.h>
+
+#include "./vpx_dsp_rtcd.h"
+#include "vpx_dsp/mips/convolve_common_dspr2.h"
+#include "vpx_dsp/vpx_dsp_common.h"
+#include "vpx_dsp/vpx_filter.h"
+#include "vpx_ports/mem.h"
+
+#if HAVE_DSPR2
+static void convolve_horiz_4_dspr2(const uint8_t *src,
+                                   int32_t src_stride,
+                                   uint8_t *dst,
+                                   int32_t dst_stride,
+                                   const int16_t *filter_x0,
+                                   int32_t h) {
+  int32_t y;
+  uint8_t *cm = vpx_ff_cropTbl;
+  int32_t vector1b, vector2b, vector3b, vector4b;
+  int32_t Temp1, Temp2, Temp3, Temp4;
+  uint32_t vector4a = 64;
+  uint32_t tp1, tp2;
+  uint32_t p1, p2, p3, p4;
+  uint32_t n1, n2, n3, n4;
+  uint32_t tn1, tn2;
+
+  vector1b = ((const int32_t *)filter_x0)[0];
+  vector2b = ((const int32_t *)filter_x0)[1];
+  vector3b = ((const int32_t *)filter_x0)[2];
+  vector4b = ((const int32_t *)filter_x0)[3];
+
+  for (y = h; y--;) {
+    /* prefetch data to cache memory */
+    prefetch_load(src + src_stride);
+    prefetch_load(src + src_stride + 32);
+    prefetch_store(dst + dst_stride);
+
+    __asm__ __volatile__ (
+        "ulw              %[tp1],      0(%[src])                      \n\t"
+        "ulw              %[tp2],      4(%[src])                      \n\t"
+
+        /* even 1. pixel */
+        "mtlo             %[vector4a], $ac3                           \n\t"
+        "mthi             $zero,       $ac3                           \n\t"
+        "preceu.ph.qbr    %[p1],       %[tp1]                         \n\t"
+        "preceu.ph.qbl    %[p2],       %[tp1]                         \n\t"
+        "preceu.ph.qbr    %[p3],       %[tp2]                         \n\t"
+        "preceu.ph.qbl    %[p4],       %[tp2]                         \n\t"
+        "dpa.w.ph         $ac3,        %[p1],          %[vector1b]    \n\t"
+        "dpa.w.ph         $ac3,        %[p2],          %[vector2b]    \n\t"
+        "dpa.w.ph         $ac3,        %[p3],          %[vector3b]    \n\t"
+        "ulw              %[tn2],      8(%[src])                      \n\t"
+        "dpa.w.ph         $ac3,        %[p4],          %[vector4b]    \n\t"
+        "extp             %[Temp1],    $ac3,           31             \n\t"
+
+        /* even 2. pixel */
+        "mtlo             %[vector4a], $ac2                           \n\t"
+        "mthi             $zero,       $ac2                           \n\t"
+        "preceu.ph.qbr    %[p1],       %[tn2]                         \n\t"
+        "balign           %[tn1],      %[tn2],         3              \n\t"
+        "balign           %[tn2],      %[tp2],         3              \n\t"
+        "balign           %[tp2],      %[tp1],         3              \n\t"
+        "dpa.w.ph         $ac2,        %[p2],          %[vector1b]    \n\t"
+        "dpa.w.ph         $ac2,        %[p3],          %[vector2b]    \n\t"
+        "dpa.w.ph         $ac2,        %[p4],          %[vector3b]    \n\t"
+        "dpa.w.ph         $ac2,        %[p1],          %[vector4b]    \n\t"
+        "extp             %[Temp3],    $ac2,           31             \n\t"
+
+        /* odd 1. pixel */
+        "lbux             %[tp1],      %[Temp1](%[cm])                \n\t"
+        "mtlo             %[vector4a], $ac3                           \n\t"
+        "mthi             $zero,       $ac3                           \n\t"
+        "preceu.ph.qbr    %[n1],       %[tp2]                         \n\t"
+        "preceu.ph.qbl    %[n2],       %[tp2]                         \n\t"
+        "preceu.ph.qbr    %[n3],       %[tn2]                         \n\t"
+        "preceu.ph.qbl    %[n4],       %[tn2]                         \n\t"
+        "dpa.w.ph         $ac3,        %[n1],          %[vector1b]    \n\t"
+        "dpa.w.ph         $ac3,        %[n2],          %[vector2b]    \n\t"
+        "dpa.w.ph         $ac3,        %[n3],          %[vector3b]    \n\t"
+        "dpa.w.ph         $ac3,        %[n4],          %[vector4b]    \n\t"
+        "extp             %[Temp2],    $ac3,           31             \n\t"
+
+        /* odd 2. pixel */
+        "lbux             %[tp2],      %[Temp3](%[cm])                \n\t"
+        "mtlo             %[vector4a], $ac2                           \n\t"
+        "mthi             $zero,       $ac2                           \n\t"
+        "preceu.ph.qbr    %[n1],       %[tn1]                         \n\t"
+        "dpa.w.ph         $ac2,        %[n2],          %[vector1b]    \n\t"
+        "dpa.w.ph         $ac2,        %[n3],          %[vector2b]    \n\t"
+        "dpa.w.ph         $ac2,        %[n4],          %[vector3b]    \n\t"
+        "dpa.w.ph         $ac2,        %[n1],          %[vector4b]    \n\t"
+        "extp             %[Temp4],    $ac2,           31             \n\t"
+
+        /* clamp */
+        "lbux             %[tn1],      %[Temp2](%[cm])                \n\t"
+        "lbux             %[n2],       %[Temp4](%[cm])                \n\t"
+
+        /* store bytes */
+        "sb               %[tp1],      0(%[dst])                      \n\t"
+        "sb               %[tn1],      1(%[dst])                      \n\t"
+        "sb               %[tp2],      2(%[dst])                      \n\t"
+        "sb               %[n2],       3(%[dst])                      \n\t"
+
+        : [tp1] "=&r" (tp1), [tp2] "=&r" (tp2),
+          [tn1] "=&r" (tn1), [tn2] "=&r" (tn2),
+          [p1] "=&r" (p1), [p2] "=&r" (p2), [p3] "=&r" (p3), [p4] "=&r" (p4),
+          [n1] "=&r" (n1), [n2] "=&r" (n2), [n3] "=&r" (n3), [n4] "=&r" (n4),
+          [Temp1] "=&r" (Temp1), [Temp2] "=&r" (Temp2),
+          [Temp3] "=&r" (Temp3), [Temp4] "=&r" (Temp4)
+        : [vector1b] "r" (vector1b), [vector2b] "r" (vector2b),
+          [vector3b] "r" (vector3b), [vector4b] "r" (vector4b),
+          [vector4a] "r" (vector4a),
+          [cm] "r" (cm), [dst] "r" (dst), [src] "r" (src)
+    );
+
+    /* Next row... */
+    src += src_stride;
+    dst += dst_stride;
+  }
+}
+
+static void convolve_horiz_8_dspr2(const uint8_t *src,
+                                   int32_t src_stride,
+                                   uint8_t *dst,
+                                   int32_t dst_stride,
+                                   const int16_t *filter_x0,
+                                   int32_t h) {
+  int32_t y;
+  uint8_t *cm = vpx_ff_cropTbl;
+  uint32_t vector4a = 64;
+  int32_t vector1b, vector2b, vector3b, vector4b;
+  int32_t Temp1, Temp2, Temp3;
+  uint32_t tp1, tp2;
+  uint32_t p1, p2, p3, p4, n1;
+  uint32_t tn1, tn2, tn3;
+  uint32_t st0, st1;
+
+  vector1b = ((const int32_t *)filter_x0)[0];
+  vector2b = ((const int32_t *)filter_x0)[1];
+  vector3b = ((const int32_t *)filter_x0)[2];
+  vector4b = ((const int32_t *)filter_x0)[3];
+
+  for (y = h; y--;) {
+    /* prefetch data to cache memory */
+    prefetch_load(src + src_stride);
+    prefetch_load(src + src_stride + 32);
+    prefetch_store(dst + dst_stride);
+
+    __asm__ __volatile__ (
+        "ulw              %[tp1],      0(%[src])                      \n\t"
+        "ulw              %[tp2],      4(%[src])                      \n\t"
+
+        /* even 1. pixel */
+        "mtlo             %[vector4a], $ac3                           \n\t"
+        "mthi             $zero,       $ac3                           \n\t"
+        "mtlo             %[vector4a], $ac2                           \n\t"
+        "mthi             $zero,       $ac2                           \n\t"
+        "preceu.ph.qbr    %[p1],       %[tp1]                         \n\t"
+        "preceu.ph.qbl    %[p2],       %[tp1]                         \n\t"
+        "preceu.ph.qbr    %[p3],       %[tp2]                         \n\t"
+        "preceu.ph.qbl    %[p4],       %[tp2]                         \n\t"
+        "ulw              %[tn2],      8(%[src])                      \n\t"
+        "dpa.w.ph         $ac3,        %[p1],          %[vector1b]    \n\t"
+        "dpa.w.ph         $ac3,        %[p2],          %[vector2b]    \n\t"
+        "dpa.w.ph         $ac3,        %[p3],          %[vector3b]    \n\t"
+        "dpa.w.ph         $ac3,        %[p4],          %[vector4b]    \n\t"
+        "extp             %[Temp1],    $ac3,           31             \n\t"
+
+        /* even 2. pixel */
+        "preceu.ph.qbr    %[p1],       %[tn2]                         \n\t"
+        "preceu.ph.qbl    %[n1],       %[tn2]                         \n\t"
+        "ulw              %[tn1],      12(%[src])                     \n\t"
+        "dpa.w.ph         $ac2,        %[p2],          %[vector1b]    \n\t"
+        "dpa.w.ph         $ac2,        %[p3],          %[vector2b]    \n\t"
+        "dpa.w.ph         $ac2,        %[p4],          %[vector3b]    \n\t"
+        "dpa.w.ph         $ac2,        %[p1],          %[vector4b]    \n\t"
+        "extp             %[Temp3],    $ac2,           31             \n\t"
+
+        /* even 3. pixel */
+        "lbux             %[st0],      %[Temp1](%[cm])                \n\t"
+        "mtlo             %[vector4a], $ac1                           \n\t"
+        "mthi             $zero,       $ac1                           \n\t"
+        "preceu.ph.qbr    %[p2],       %[tn1]                         \n\t"
+        "dpa.w.ph         $ac1,        %[p3],          %[vector1b]    \n\t"
+        "dpa.w.ph         $ac1,        %[p4],          %[vector2b]    \n\t"
+        "dpa.w.ph         $ac1,        %[p1],          %[vector3b]    \n\t"
+        "dpa.w.ph         $ac1,        %[n1],          %[vector4b]    \n\t"
+        "extp             %[Temp1],    $ac1,           31             \n\t"
+
+        /* even 4. pixel */
+        "mtlo             %[vector4a], $ac2                           \n\t"
+        "mthi             $zero,       $ac2                           \n\t"
+        "mtlo             %[vector4a], $ac3                           \n\t"
+        "mthi             $zero,       $ac3                           \n\t"
+        "sb               %[st0],      0(%[dst])                      \n\t"
+        "lbux             %[st1],      %[Temp3](%[cm])                \n\t"
+
+        "balign           %[tn3],      %[tn1],         3              \n\t"
+        "balign           %[tn1],      %[tn2],         3              \n\t"
+        "balign           %[tn2],      %[tp2],         3              \n\t"
+        "balign           %[tp2],      %[tp1],         3              \n\t"
+
+        "dpa.w.ph         $ac2,        %[p4],          %[vector1b]    \n\t"
+        "dpa.w.ph         $ac2,        %[p1],          %[vector2b]    \n\t"
+        "dpa.w.ph         $ac2,        %[n1],          %[vector3b]    \n\t"
+        "dpa.w.ph         $ac2,        %[p2],          %[vector4b]    \n\t"
+        "extp             %[Temp3],    $ac2,           31             \n\t"
+
+        "lbux             %[st0],      %[Temp1](%[cm])                \n\t"
+
+        /* odd 1. pixel */
+        "mtlo             %[vector4a], $ac1                           \n\t"
+        "mthi             $zero,       $ac1                           \n\t"
+        "sb               %[st1],      2(%[dst])                      \n\t"
+        "preceu.ph.qbr    %[p1],       %[tp2]                         \n\t"
+        "preceu.ph.qbl    %[p2],       %[tp2]                         \n\t"
+        "preceu.ph.qbr    %[p3],       %[tn2]                         \n\t"
+        "preceu.ph.qbl    %[p4],       %[tn2]                         \n\t"
+        "sb               %[st0],      4(%[dst])                      \n\t"
+        "dpa.w.ph         $ac3,        %[p1],          %[vector1b]    \n\t"
+        "dpa.w.ph         $ac3,        %[p2],          %[vector2b]    \n\t"
+        "dpa.w.ph         $ac3,        %[p3],          %[vector3b]    \n\t"
+        "dpa.w.ph         $ac3,        %[p4],          %[vector4b]    \n\t"
+        "extp             %[Temp2],    $ac3,           31             \n\t"
+
+        /* odd 2. pixel */
+        "mtlo             %[vector4a], $ac3                           \n\t"
+        "mthi             $zero,       $ac3                           \n\t"
+        "mtlo             %[vector4a], $ac2                           \n\t"
+        "mthi             $zero,       $ac2                           \n\t"
+        "preceu.ph.qbr    %[p1],       %[tn1]                         \n\t"
+        "preceu.ph.qbl    %[n1],       %[tn1]                         \n\t"
+        "lbux             %[st0],      %[Temp3](%[cm])                \n\t"
+        "dpa.w.ph         $ac1,        %[p2],          %[vector1b]    \n\t"
+        "dpa.w.ph         $ac1,        %[p3],          %[vector2b]    \n\t"
+        "dpa.w.ph         $ac1,        %[p4],          %[vector3b]    \n\t"
+        "dpa.w.ph         $ac1,        %[p1],          %[vector4b]    \n\t"
+        "extp             %[Temp3],    $ac1,           31             \n\t"
+
+        /* odd 3. pixel */
+        "lbux             %[st1],      %[Temp2](%[cm])                \n\t"
+        "preceu.ph.qbr    %[p2],       %[tn3]                         \n\t"
+        "dpa.w.ph         $ac3,        %[p3],          %[vector1b]    \n\t"
+        "dpa.w.ph         $ac3,        %[p4],          %[vector2b]    \n\t"
+        "dpa.w.ph         $ac3,        %[p1],          %[vector3b]    \n\t"
+        "dpa.w.ph         $ac3,        %[n1],          %[vector4b]    \n\t"
+        "extp             %[Temp2],    $ac3,           31             \n\t"
+
+        /* odd 4. pixel */
+        "sb               %[st1],      1(%[dst])                      \n\t"
+        "sb               %[st0],      6(%[dst])                      \n\t"
+        "dpa.w.ph         $ac2,        %[p4],          %[vector1b]    \n\t"
+        "dpa.w.ph         $ac2,        %[p1],          %[vector2b]    \n\t"
+        "dpa.w.ph         $ac2,        %[n1],          %[vector3b]    \n\t"
+        "dpa.w.ph         $ac2,        %[p2],          %[vector4b]    \n\t"
+        "extp             %[Temp1],    $ac2,           31             \n\t"
+
+        /* clamp */
+        "lbux             %[p4],       %[Temp3](%[cm])                \n\t"
+        "lbux             %[p2],       %[Temp2](%[cm])                \n\t"
+        "lbux             %[n1],       %[Temp1](%[cm])                \n\t"
+
+        /* store bytes */
+        "sb               %[p4],       3(%[dst])                      \n\t"
+        "sb               %[p2],       5(%[dst])                      \n\t"
+        "sb               %[n1],       7(%[dst])                      \n\t"
+
+        : [tp1] "=&r" (tp1), [tp2] "=&r" (tp2),
+          [tn1] "=&r" (tn1), [tn2] "=&r" (tn2), [tn3] "=&r" (tn3),
+          [st0] "=&r" (st0), [st1] "=&r" (st1),
+          [p1] "=&r" (p1), [p2] "=&r" (p2), [p3] "=&r" (p3), [p4] "=&r" (p4),
+          [n1] "=&r" (n1),
+          [Temp1] "=&r" (Temp1), [Temp2] "=&r" (Temp2), [Temp3] "=&r" (Temp3)
+        : [vector1b] "r" (vector1b), [vector2b] "r" (vector2b),
+          [vector3b] "r" (vector3b), [vector4b] "r" (vector4b),
+          [vector4a] "r" (vector4a),
+          [cm] "r" (cm), [dst] "r" (dst), [src] "r" (src)
+    );
+
+    /* Next row... */
+    src += src_stride;
+    dst += dst_stride;
+  }
+}
+
+static void convolve_horiz_16_dspr2(const uint8_t *src_ptr,
+                                    int32_t src_stride,
+                                    uint8_t *dst_ptr,
+                                    int32_t dst_stride,
+                                    const int16_t *filter_x0,
+                                    int32_t h,
+                                    int32_t count) {
+  int32_t y, c;
+  const uint8_t *src;
+  uint8_t *dst;
+  uint8_t *cm = vpx_ff_cropTbl;
+  uint32_t vector_64 = 64;
+  int32_t filter12, filter34, filter56, filter78;
+  int32_t Temp1, Temp2, Temp3;
+  uint32_t qload1, qload2, qload3;
+  uint32_t p1, p2, p3, p4, p5;
+  uint32_t st1, st2, st3;
+
+  filter12 = ((const int32_t *)filter_x0)[0];
+  filter34 = ((const int32_t *)filter_x0)[1];
+  filter56 = ((const int32_t *)filter_x0)[2];
+  filter78 = ((const int32_t *)filter_x0)[3];
+
+  for (y = h; y--;) {
+    src = src_ptr;
+    dst = dst_ptr;
+
+    /* prefetch data to cache memory */
+    prefetch_load(src_ptr + src_stride);
+    prefetch_load(src_ptr + src_stride + 32);
+    prefetch_store(dst_ptr + dst_stride);
+
+    for (c = 0; c < count; c++) {
+      __asm__ __volatile__ (
+          "ulw              %[qload1],    0(%[src])                    \n\t"
+          "ulw              %[qload2],    4(%[src])                    \n\t"
+
+          /* even 1. pixel */
+          "mtlo             %[vector_64], $ac1                         \n\t" /* even 1 */
+          "mthi             $zero,        $ac1                         \n\t"
+          "mtlo             %[vector_64], $ac2                         \n\t" /* even 2 */
+          "mthi             $zero,        $ac2                         \n\t"
+          "preceu.ph.qbr    %[p1],        %[qload1]                    \n\t"
+          "preceu.ph.qbl    %[p2],        %[qload1]                    \n\t"
+          "preceu.ph.qbr    %[p3],        %[qload2]                    \n\t"
+          "preceu.ph.qbl    %[p4],        %[qload2]                    \n\t"
+          "ulw              %[qload3],    8(%[src])                    \n\t"
+          "dpa.w.ph         $ac1,         %[p1],          %[filter12]  \n\t" /* even 1 */
+          "dpa.w.ph         $ac1,         %[p2],          %[filter34]  \n\t" /* even 1 */
+          "dpa.w.ph         $ac1,         %[p3],          %[filter56]  \n\t" /* even 1 */
+          "dpa.w.ph         $ac1,         %[p4],          %[filter78]  \n\t" /* even 1 */
+          "extp             %[Temp1],     $ac1,           31           \n\t" /* even 1 */
+
+          /* even 2. pixel */
+          "mtlo             %[vector_64], $ac3                         \n\t" /* even 3 */
+          "mthi             $zero,        $ac3                         \n\t"
+          "preceu.ph.qbr    %[p1],        %[qload3]                    \n\t"
+          "preceu.ph.qbl    %[p5],        %[qload3]                    \n\t"
+          "ulw              %[qload1],    12(%[src])                   \n\t"
+          "dpa.w.ph         $ac2,         %[p2],          %[filter12]  \n\t" /* even 1 */
+          "dpa.w.ph         $ac2,         %[p3],          %[filter34]  \n\t" /* even 1 */
+          "dpa.w.ph         $ac2,         %[p4],          %[filter56]  \n\t" /* even 1 */
+          "dpa.w.ph         $ac2,         %[p1],          %[filter78]  \n\t" /* even 1 */
+          "extp             %[Temp2],     $ac2,           31           \n\t" /* even 1 */
+          "lbux             %[st1],       %[Temp1](%[cm])              \n\t" /* even 1 */
+
+          /* even 3. pixel */
+          "mtlo             %[vector_64], $ac1                         \n\t" /* even 4 */
+          "mthi             $zero,        $ac1                         \n\t"
+          "preceu.ph.qbr    %[p2],        %[qload1]                    \n\t"
+          "sb               %[st1],       0(%[dst])                    \n\t" /* even 1 */
+          "dpa.w.ph         $ac3,         %[p3],          %[filter12]  \n\t" /* even 3 */
+          "dpa.w.ph         $ac3,         %[p4],          %[filter34]  \n\t" /* even 3 */
+          "dpa.w.ph         $ac3,         %[p1],          %[filter56]  \n\t" /* even 3 */
+          "dpa.w.ph         $ac3,         %[p5],          %[filter78]  \n\t" /* even 3 */
+          "extp             %[Temp3],     $ac3,           31           \n\t" /* even 3 */
+          "lbux             %[st2],       %[Temp2](%[cm])              \n\t" /* even 1 */
+
+          /* even 4. pixel */
+          "mtlo             %[vector_64], $ac2                         \n\t" /* even 5 */
+          "mthi             $zero,        $ac2                         \n\t"
+          "preceu.ph.qbl    %[p3],        %[qload1]                    \n\t"
+          "sb               %[st2],       2(%[dst])                    \n\t" /* even 1 */
+          "ulw              %[qload2],    16(%[src])                   \n\t"
+          "dpa.w.ph         $ac1,         %[p4],          %[filter12]  \n\t" /* even 4 */
+          "dpa.w.ph         $ac1,         %[p1],          %[filter34]  \n\t" /* even 4 */
+          "dpa.w.ph         $ac1,         %[p5],          %[filter56]  \n\t" /* even 4 */
+          "dpa.w.ph         $ac1,         %[p2],          %[filter78]  \n\t" /* even 4 */
+          "extp             %[Temp1],     $ac1,           31           \n\t" /* even 4 */
+          "lbux             %[st3],       %[Temp3](%[cm])              \n\t" /* even 3 */
+
+          /* even 5. pixel */
+          "mtlo             %[vector_64], $ac3                         \n\t" /* even 6 */
+          "mthi             $zero,        $ac3                         \n\t"
+          "preceu.ph.qbr    %[p4],        %[qload2]                    \n\t"
+          "sb               %[st3],       4(%[dst])                    \n\t" /* even 3 */
+          "dpa.w.ph         $ac2,         %[p1],          %[filter12]  \n\t" /* even 5 */
+          "dpa.w.ph         $ac2,         %[p5],          %[filter34]  \n\t" /* even 5 */
+          "dpa.w.ph         $ac2,         %[p2],          %[filter56]  \n\t" /* even 5 */
+          "dpa.w.ph         $ac2,         %[p3],          %[filter78]  \n\t" /* even 5 */
+          "extp             %[Temp2],     $ac2,           31           \n\t" /* even 5 */
+          "lbux             %[st1],       %[Temp1](%[cm])              \n\t" /* even 4 */
+
+          /* even 6. pixel */
+          "mtlo             %[vector_64], $ac1                         \n\t" /* even 7 */
+          "mthi             $zero,        $ac1                         \n\t"
+          "preceu.ph.qbl    %[p1],        %[qload2]                    \n\t"
+          "sb               %[st1],       6(%[dst])                    \n\t" /* even 4 */
+          "ulw              %[qload3],    20(%[src])                   \n\t"
+          "dpa.w.ph         $ac3,         %[p5],          %[filter12]  \n\t" /* even 6 */
+          "dpa.w.ph         $ac3,         %[p2],          %[filter34]  \n\t" /* even 6 */
+          "dpa.w.ph         $ac3,         %[p3],          %[filter56]  \n\t" /* even 6 */
+          "dpa.w.ph         $ac3,         %[p4],          %[filter78]  \n\t" /* even 6 */
+          "extp             %[Temp3],     $ac3,           31           \n\t" /* even 6 */
+          "lbux             %[st2],       %[Temp2](%[cm])              \n\t" /* even 5 */
+
+          /* even 7. pixel */
+          "mtlo             %[vector_64], $ac2                         \n\t" /* even 8 */
+          "mthi             $zero,        $ac2                         \n\t"
+          "preceu.ph.qbr    %[p5],        %[qload3]                    \n\t"
+          "sb               %[st2],       8(%[dst])                    \n\t" /* even 5 */
+          "dpa.w.ph         $ac1,         %[p2],          %[filter12]  \n\t" /* even 7 */
+          "dpa.w.ph         $ac1,         %[p3],          %[filter34]  \n\t" /* even 7 */
+          "dpa.w.ph         $ac1,         %[p4],          %[filter56]  \n\t" /* even 7 */
+          "dpa.w.ph         $ac1,         %[p1],          %[filter78]  \n\t" /* even 7 */
+          "extp             %[Temp1],     $ac1,           31           \n\t" /* even 7 */
+          "lbux             %[st3],       %[Temp3](%[cm])              \n\t" /* even 6 */
+
+          /* even 8. pixel */
+          "mtlo             %[vector_64], $ac3                         \n\t" /* odd 1 */
+          "mthi             $zero,        $ac3                         \n\t"
+          "dpa.w.ph         $ac2,         %[p3],          %[filter12]  \n\t" /* even 8 */
+          "dpa.w.ph         $ac2,         %[p4],          %[filter34]  \n\t" /* even 8 */
+          "sb               %[st3],       10(%[dst])                   \n\t" /* even 6 */
+          "dpa.w.ph         $ac2,         %[p1],          %[filter56]  \n\t" /* even 8 */
+          "dpa.w.ph         $ac2,         %[p5],          %[filter78]  \n\t" /* even 8 */
+          "extp             %[Temp2],     $ac2,           31           \n\t" /* even 8 */
+          "lbux             %[st1],       %[Temp1](%[cm])              \n\t" /* even 7 */
+
+          /* ODD pixels */
+          "ulw              %[qload1],    1(%[src])                    \n\t"
+          "ulw              %[qload2],    5(%[src])                    \n\t"
+
+          /* odd 1. pixel */
+          "mtlo             %[vector_64], $ac1                         \n\t" /* odd 2 */
+          "mthi             $zero,        $ac1                         \n\t"
+          "preceu.ph.qbr    %[p1],        %[qload1]                    \n\t"
+          "preceu.ph.qbl    %[p2],        %[qload1]                    \n\t"
+          "preceu.ph.qbr    %[p3],        %[qload2]                    \n\t"
+          "preceu.ph.qbl    %[p4],        %[qload2]                    \n\t"
+          "sb               %[st1],       12(%[dst])                   \n\t" /* even 7 */
+          "ulw              %[qload3],    9(%[src])                    \n\t"
+          "dpa.w.ph         $ac3,         %[p1],          %[filter12]  \n\t" /* odd 1 */
+          "dpa.w.ph         $ac3,         %[p2],          %[filter34]  \n\t" /* odd 1 */
+          "dpa.w.ph         $ac3,         %[p3],          %[filter56]  \n\t" /* odd 1 */
+          "dpa.w.ph         $ac3,         %[p4],          %[filter78]  \n\t" /* odd 1 */
+          "extp             %[Temp3],     $ac3,           31           \n\t" /* odd 1 */
+          "lbux             %[st2],       %[Temp2](%[cm])              \n\t" /* even 8 */
+
+          /* odd 2. pixel */
+          "mtlo             %[vector_64], $ac2                         \n\t" /* odd 3 */
+          "mthi             $zero,        $ac2                         \n\t"
+          "preceu.ph.qbr    %[p1],        %[qload3]                    \n\t"
+          "preceu.ph.qbl    %[p5],        %[qload3]                    \n\t"
+          "sb               %[st2],       14(%[dst])                   \n\t" /* even 8 */
+          "ulw              %[qload1],    13(%[src])                   \n\t"
+          "dpa.w.ph         $ac1,         %[p2],          %[filter12]  \n\t" /* odd 2 */
+          "dpa.w.ph         $ac1,         %[p3],          %[filter34]  \n\t" /* odd 2 */
+          "dpa.w.ph         $ac1,         %[p4],          %[filter56]  \n\t" /* odd 2 */
+          "dpa.w.ph         $ac1,         %[p1],          %[filter78]  \n\t" /* odd 2 */
+          "extp             %[Temp1],     $ac1,           31           \n\t" /* odd 2 */
+          "lbux             %[st3],       %[Temp3](%[cm])              \n\t" /* odd 1 */
+
+          /* odd 3. pixel */
+          "mtlo             %[vector_64], $ac3                         \n\t" /* odd 4 */
+          "mthi             $zero,        $ac3                         \n\t"
+          "preceu.ph.qbr    %[p2],        %[qload1]                    \n\t"
+          "sb               %[st3],       1(%[dst])                    \n\t" /* odd 1 */
+          "dpa.w.ph         $ac2,         %[p3],          %[filter12]  \n\t" /* odd 3 */
+          "dpa.w.ph         $ac2,         %[p4],          %[filter34]  \n\t" /* odd 3 */
+          "dpa.w.ph         $ac2,         %[p1],          %[filter56]  \n\t" /* odd 3 */
+          "dpa.w.ph         $ac2,         %[p5],          %[filter78]  \n\t" /* odd 3 */
+          "extp             %[Temp2],     $ac2,           31           \n\t" /* odd 3 */
+          "lbux             %[st1],       %[Temp1](%[cm])              \n\t" /* odd 2 */
+
+          /* odd 4. pixel */
+          "mtlo             %[vector_64], $ac1                         \n\t" /* odd 5 */
+          "mthi             $zero,        $ac1                         \n\t"
+          "preceu.ph.qbl    %[p3],        %[qload1]                    \n\t"
+          "sb               %[st1],       3(%[dst])                    \n\t" /* odd 2 */
+          "ulw              %[qload2],    17(%[src])                   \n\t"
+          "dpa.w.ph         $ac3,         %[p4],          %[filter12]  \n\t" /* odd 4 */
+          "dpa.w.ph         $ac3,         %[p1],          %[filter34]  \n\t" /* odd 4 */
+          "dpa.w.ph         $ac3,         %[p5],          %[filter56]  \n\t" /* odd 4 */
+          "dpa.w.ph         $ac3,         %[p2],          %[filter78]  \n\t" /* odd 4 */
+          "extp             %[Temp3],     $ac3,           31           \n\t" /* odd 4 */
+          "lbux             %[st2],       %[Temp2](%[cm])              \n\t" /* odd 3 */
+
+          /* odd 5. pixel */
+          "mtlo             %[vector_64], $ac2                         \n\t" /* odd 6 */
+          "mthi             $zero,        $ac2                         \n\t"
+          "preceu.ph.qbr    %[p4],        %[qload2]                    \n\t"
+          "sb               %[st2],       5(%[dst])                    \n\t" /* odd 3 */
+          "dpa.w.ph         $ac1,         %[p1],          %[filter12]  \n\t" /* odd 5 */
+          "dpa.w.ph         $ac1,         %[p5],          %[filter34]  \n\t" /* odd 5 */
+          "dpa.w.ph         $ac1,         %[p2],          %[filter56]  \n\t" /* odd 5 */
+          "dpa.w.ph         $ac1,         %[p3],          %[filter78]  \n\t" /* odd 5 */
+          "extp             %[Temp1],     $ac1,           31           \n\t" /* odd 5 */
+          "lbux             %[st3],       %[Temp3](%[cm])              \n\t" /* odd 4 */
+
+          /* odd 6. pixel */
+          "mtlo             %[vector_64], $ac3                         \n\t" /* odd 7 */
+          "mthi             $zero,        $ac3                         \n\t"
+          "preceu.ph.qbl    %[p1],        %[qload2]                    \n\t"
+          "sb               %[st3],       7(%[dst])                    \n\t" /* odd 4 */
+          "ulw              %[qload3],    21(%[src])                   \n\t"
+          "dpa.w.ph         $ac2,         %[p5],          %[filter12]  \n\t" /* odd 6 */
+          "dpa.w.ph         $ac2,         %[p2],          %[filter34]  \n\t" /* odd 6 */
+          "dpa.w.ph         $ac2,         %[p3],          %[filter56]  \n\t" /* odd 6 */
+          "dpa.w.ph         $ac2,         %[p4],          %[filter78]  \n\t" /* odd 6 */
+          "extp             %[Temp2],     $ac2,           31           \n\t" /* odd 6 */
+          "lbux             %[st1],       %[Temp1](%[cm])              \n\t" /* odd 5 */
+
+          /* odd 7. pixel */
+          "mtlo             %[vector_64], $ac1                         \n\t" /* odd 8 */
+          "mthi             $zero,        $ac1                         \n\t"
+          "preceu.ph.qbr    %[p5],        %[qload3]                    \n\t"
+          "sb               %[st1],       9(%[dst])                    \n\t" /* odd 5 */
+          "dpa.w.ph         $ac3,         %[p2],          %[filter12]  \n\t" /* odd 7 */
+          "dpa.w.ph         $ac3,         %[p3],          %[filter34]  \n\t" /* odd 7 */
+          "dpa.w.ph         $ac3,         %[p4],          %[filter56]  \n\t" /* odd 7 */
+          "dpa.w.ph         $ac3,         %[p1],          %[filter78]  \n\t" /* odd 7 */
+          "extp             %[Temp3],     $ac3,           31           \n\t" /* odd 7 */
+
+          /* odd 8. pixel */
+          "dpa.w.ph         $ac1,         %[p3],          %[filter12]  \n\t" /* odd 8 */
+          "dpa.w.ph         $ac1,         %[p4],          %[filter34]  \n\t" /* odd 8 */
+          "dpa.w.ph         $ac1,         %[p1],          %[filter56]  \n\t" /* odd 8 */
+          "dpa.w.ph         $ac1,         %[p5],          %[filter78]  \n\t" /* odd 8 */
+          "extp             %[Temp1],     $ac1,           31           \n\t" /* odd 8 */
+
+          "lbux             %[st2],       %[Temp2](%[cm])              \n\t" /* odd 6 */
+          "lbux             %[st3],       %[Temp3](%[cm])              \n\t" /* odd 7 */
+          "lbux             %[st1],       %[Temp1](%[cm])              \n\t" /* odd 8 */
+
+          "sb               %[st2],       11(%[dst])                   \n\t" /* odd 6 */
+          "sb               %[st3],       13(%[dst])                   \n\t" /* odd 7 */
+          "sb               %[st1],       15(%[dst])                   \n\t" /* odd 8 */
+
+          : [qload1] "=&r" (qload1), [qload2] "=&r" (qload2), [qload3] "=&r" (qload3),
+            [st1] "=&r" (st1), [st2] "=&r" (st2), [st3] "=&r" (st3),
+            [p1] "=&r" (p1), [p2] "=&r" (p2), [p3] "=&r" (p3), [p4] "=&r" (p4),
+            [p5] "=&r" (p5),
+            [Temp1] "=&r" (Temp1), [Temp2] "=&r" (Temp2), [Temp3] "=&r" (Temp3)
+          : [filter12] "r" (filter12), [filter34] "r" (filter34),
+            [filter56] "r" (filter56), [filter78] "r" (filter78),
+            [vector_64] "r" (vector_64),
+            [cm] "r" (cm), [dst] "r" (dst),
+            [src] "r" (src)
+      );
+
+      src += 16;
+      dst += 16;
+    }
+
+    /* Next row... */
+    src_ptr += src_stride;
+    dst_ptr += dst_stride;
+  }
+}
+
+static void convolve_horiz_64_dspr2(const uint8_t *src_ptr,
+                                    int32_t src_stride,
+                                    uint8_t *dst_ptr,
+                                    int32_t dst_stride,
+                                    const int16_t *filter_x0,
+                                    int32_t h) {
+  int32_t y, c;
+  const uint8_t *src;
+  uint8_t *dst;
+  uint8_t *cm = vpx_ff_cropTbl;
+  uint32_t vector_64 = 64;
+  int32_t filter12, filter34, filter56, filter78;
+  int32_t Temp1, Temp2, Temp3;
+  uint32_t qload1, qload2, qload3;
+  uint32_t p1, p2, p3, p4, p5;
+  uint32_t st1, st2, st3;
+
+  filter12 = ((const int32_t *)filter_x0)[0];
+  filter34 = ((const int32_t *)filter_x0)[1];
+  filter56 = ((const int32_t *)filter_x0)[2];
+  filter78 = ((const int32_t *)filter_x0)[3];
+
+  for (y = h; y--;) {
+    src = src_ptr;
+    dst = dst_ptr;
+
+    /* prefetch data to cache memory */
+    prefetch_load(src_ptr + src_stride);
+    prefetch_load(src_ptr + src_stride + 32);
+    prefetch_load(src_ptr + src_stride + 64);
+    prefetch_store(dst_ptr + dst_stride);
+    prefetch_store(dst_ptr + dst_stride + 32);
+
+    for (c = 0; c < 4; c++) {
+      __asm__ __volatile__ (
+          "ulw              %[qload1],    0(%[src])                    \n\t"
+          "ulw              %[qload2],    4(%[src])                    \n\t"
+
+          /* even 1. pixel */
+          "mtlo             %[vector_64], $ac1                         \n\t" /* even 1 */
+          "mthi             $zero,        $ac1                         \n\t"
+          "mtlo             %[vector_64], $ac2                         \n\t" /* even 2 */
+          "mthi             $zero,        $ac2                         \n\t"
+          "preceu.ph.qbr    %[p1],        %[qload1]                    \n\t"
+          "preceu.ph.qbl    %[p2],        %[qload1]                    \n\t"
+          "preceu.ph.qbr    %[p3],        %[qload2]                    \n\t"
+          "preceu.ph.qbl    %[p4],        %[qload2]                    \n\t"
+          "ulw              %[qload3],    8(%[src])                    \n\t"
+          "dpa.w.ph         $ac1,         %[p1],          %[filter12]  \n\t" /* even 1 */
+          "dpa.w.ph         $ac1,         %[p2],          %[filter34]  \n\t" /* even 1 */
+          "dpa.w.ph         $ac1,         %[p3],          %[filter56]  \n\t" /* even 1 */
+          "dpa.w.ph         $ac1,         %[p4],          %[filter78]  \n\t" /* even 1 */
+          "extp             %[Temp1],     $ac1,           31           \n\t" /* even 1 */
+
+          /* even 2. pixel */
+          "mtlo             %[vector_64], $ac3                         \n\t" /* even 3 */
+          "mthi             $zero,        $ac3                         \n\t"
+          "preceu.ph.qbr    %[p1],        %[qload3]                    \n\t"
+          "preceu.ph.qbl    %[p5],        %[qload3]                    \n\t"
+          "ulw              %[qload1],    12(%[src])                   \n\t"
+          "dpa.w.ph         $ac2,         %[p2],          %[filter12]  \n\t" /* even 1 */
+          "dpa.w.ph         $ac2,         %[p3],          %[filter34]  \n\t" /* even 1 */
+          "dpa.w.ph         $ac2,         %[p4],          %[filter56]  \n\t" /* even 1 */
+          "dpa.w.ph         $ac2,         %[p1],          %[filter78]  \n\t" /* even 1 */
+          "extp             %[Temp2],     $ac2,           31           \n\t" /* even 1 */
+          "lbux             %[st1],       %[Temp1](%[cm])              \n\t" /* even 1 */
+
+          /* even 3. pixel */
+          "mtlo             %[vector_64], $ac1                         \n\t" /* even 4 */
+          "mthi             $zero,        $ac1                         \n\t"
+          "preceu.ph.qbr    %[p2],        %[qload1]                    \n\t"
+          "sb               %[st1],       0(%[dst])                    \n\t" /* even 1 */
+          "dpa.w.ph         $ac3,         %[p3],          %[filter12]  \n\t" /* even 3 */
+          "dpa.w.ph         $ac3,         %[p4],          %[filter34]  \n\t" /* even 3 */
+          "dpa.w.ph         $ac3,         %[p1],          %[filter56]  \n\t" /* even 3 */
+          "dpa.w.ph         $ac3,         %[p5],          %[filter78]  \n\t" /* even 3 */
+          "extp             %[Temp3],     $ac3,           31           \n\t" /* even 3 */
+          "lbux             %[st2],       %[Temp2](%[cm])              \n\t" /* even 1 */
+
+          /* even 4. pixel */
+          "mtlo             %[vector_64], $ac2                         \n\t" /* even 5 */
+          "mthi             $zero,        $ac2                         \n\t"
+          "preceu.ph.qbl    %[p3],        %[qload1]                    \n\t"
+          "sb               %[st2],       2(%[dst])                    \n\t" /* even 1 */
+          "ulw              %[qload2],    16(%[src])                   \n\t"
+          "dpa.w.ph         $ac1,         %[p4],          %[filter12]  \n\t" /* even 4 */
+          "dpa.w.ph         $ac1,         %[p1],          %[filter34]  \n\t" /* even 4 */
+          "dpa.w.ph         $ac1,         %[p5],          %[filter56]  \n\t" /* even 4 */
+          "dpa.w.ph         $ac1,         %[p2],          %[filter78]  \n\t" /* even 4 */
+          "extp             %[Temp1],     $ac1,           31           \n\t" /* even 4 */
+          "lbux             %[st3],       %[Temp3](%[cm])              \n\t" /* even 3 */
+
+          /* even 5. pixel */
+          "mtlo             %[vector_64], $ac3                         \n\t" /* even 6 */
+          "mthi             $zero,        $ac3                         \n\t"
+          "preceu.ph.qbr    %[p4],        %[qload2]                    \n\t"
+          "sb               %[st3],       4(%[dst])                    \n\t" /* even 3 */
+          "dpa.w.ph         $ac2,         %[p1],          %[filter12]  \n\t" /* even 5 */
+          "dpa.w.ph         $ac2,         %[p5],          %[filter34]  \n\t" /* even 5 */
+          "dpa.w.ph         $ac2,         %[p2],          %[filter56]  \n\t" /* even 5 */
+          "dpa.w.ph         $ac2,         %[p3],          %[filter78]  \n\t" /* even 5 */
+          "extp             %[Temp2],     $ac2,           31           \n\t" /* even 5 */
+          "lbux             %[st1],       %[Temp1](%[cm])              \n\t" /* even 4 */
+
+          /* even 6. pixel */
+          "mtlo             %[vector_64], $ac1                         \n\t" /* even 7 */
+          "mthi             $zero,        $ac1                         \n\t"
+          "preceu.ph.qbl    %[p1],        %[qload2]                    \n\t"
+          "sb               %[st1],       6(%[dst])                    \n\t" /* even 4 */
+          "ulw              %[qload3],    20(%[src])                   \n\t"
+          "dpa.w.ph         $ac3,         %[p5],          %[filter12]  \n\t" /* even 6 */
+          "dpa.w.ph         $ac3,         %[p2],          %[filter34]  \n\t" /* even 6 */
+          "dpa.w.ph         $ac3,         %[p3],          %[filter56]  \n\t" /* even 6 */
+          "dpa.w.ph         $ac3,         %[p4],          %[filter78]  \n\t" /* even 6 */
+          "extp             %[Temp3],     $ac3,           31           \n\t" /* even 6 */
+          "lbux             %[st2],       %[Temp2](%[cm])              \n\t" /* even 5 */
+
+          /* even 7. pixel */
+          "mtlo             %[vector_64], $ac2                         \n\t" /* even 8 */
+          "mthi             $zero,        $ac2                         \n\t"
+          "preceu.ph.qbr    %[p5],        %[qload3]                    \n\t"
+          "sb               %[st2],       8(%[dst])                    \n\t" /* even 5 */
+          "dpa.w.ph         $ac1,         %[p2],          %[filter12]  \n\t" /* even 7 */
+          "dpa.w.ph         $ac1,         %[p3],          %[filter34]  \n\t" /* even 7 */
+          "dpa.w.ph         $ac1,         %[p4],          %[filter56]  \n\t" /* even 7 */
+          "dpa.w.ph         $ac1,         %[p1],          %[filter78]  \n\t" /* even 7 */
+          "extp             %[Temp1],     $ac1,           31           \n\t" /* even 7 */
+          "lbux             %[st3],       %[Temp3](%[cm])              \n\t" /* even 6 */
+
+          /* even 8. pixel */
+          "mtlo             %[vector_64], $ac3                         \n\t" /* odd 1 */
+          "mthi             $zero,        $ac3                         \n\t"
+          "dpa.w.ph         $ac2,         %[p3],          %[filter12]  \n\t" /* even 8 */
+          "dpa.w.ph         $ac2,         %[p4],          %[filter34]  \n\t" /* even 8 */
+          "sb               %[st3],       10(%[dst])                   \n\t" /* even 6 */
+          "dpa.w.ph         $ac2,         %[p1],          %[filter56]  \n\t" /* even 8 */
+          "dpa.w.ph         $ac2,         %[p5],          %[filter78]  \n\t" /* even 8 */
+          "extp             %[Temp2],     $ac2,           31           \n\t" /* even 8 */
+          "lbux             %[st1],       %[Temp1](%[cm])              \n\t" /* even 7 */
+
+          /* ODD pixels */
+          "ulw              %[qload1],    1(%[src])                    \n\t"
+          "ulw              %[qload2],    5(%[src])                    \n\t"
+
+          /* odd 1. pixel */
+          "mtlo             %[vector_64], $ac1                         \n\t" /* odd 2 */
+          "mthi             $zero,        $ac1                         \n\t"
+          "preceu.ph.qbr    %[p1],        %[qload1]                    \n\t"
+          "preceu.ph.qbl    %[p2],        %[qload1]                    \n\t"
+          "preceu.ph.qbr    %[p3],        %[qload2]                    \n\t"
+          "preceu.ph.qbl    %[p4],        %[qload2]                    \n\t"
+          "sb               %[st1],       12(%[dst])                   \n\t" /* even 7 */
+          "ulw              %[qload3],    9(%[src])                    \n\t"
+          "dpa.w.ph         $ac3,         %[p1],          %[filter12]  \n\t" /* odd 1 */
+          "dpa.w.ph         $ac3,         %[p2],          %[filter34]  \n\t" /* odd 1 */
+          "dpa.w.ph         $ac3,         %[p3],          %[filter56]  \n\t" /* odd 1 */
+          "dpa.w.ph         $ac3,         %[p4],          %[filter78]  \n\t" /* odd 1 */
+          "extp             %[Temp3],     $ac3,           31           \n\t" /* odd 1 */
+          "lbux             %[st2],       %[Temp2](%[cm])              \n\t" /* even 8 */
+
+          /* odd 2. pixel */
+          "mtlo             %[vector_64], $ac2                         \n\t" /* odd 3 */
+          "mthi             $zero,        $ac2                         \n\t"
+          "preceu.ph.qbr    %[p1],        %[qload3]                    \n\t"
+          "preceu.ph.qbl    %[p5],        %[qload3]                    \n\t"
+          "sb               %[st2],       14(%[dst])                   \n\t" /* even 8 */
+          "ulw              %[qload1],    13(%[src])                   \n\t"
+          "dpa.w.ph         $ac1,         %[p2],          %[filter12]  \n\t" /* odd 2 */
+          "dpa.w.ph         $ac1,         %[p3],          %[filter34]  \n\t" /* odd 2 */
+          "dpa.w.ph         $ac1,         %[p4],          %[filter56]  \n\t" /* odd 2 */
+          "dpa.w.ph         $ac1,         %[p1],          %[filter78]  \n\t" /* odd 2 */
+          "extp             %[Temp1],     $ac1,           31           \n\t" /* odd 2 */
+          "lbux             %[st3],       %[Temp3](%[cm])              \n\t" /* odd 1 */
+
+          /* odd 3. pixel */
+          "mtlo             %[vector_64], $ac3                         \n\t" /* odd 4 */
+          "mthi             $zero,        $ac3                         \n\t"
+          "preceu.ph.qbr    %[p2],        %[qload1]                    \n\t"
+          "sb               %[st3],       1(%[dst])                    \n\t" /* odd 1 */
+          "dpa.w.ph         $ac2,         %[p3],          %[filter12]  \n\t" /* odd 3 */
+          "dpa.w.ph         $ac2,         %[p4],          %[filter34]  \n\t" /* odd 3 */
+          "dpa.w.ph         $ac2,         %[p1],          %[filter56]  \n\t" /* odd 3 */
+          "dpa.w.ph         $ac2,         %[p5],          %[filter78]  \n\t" /* odd 3 */
+          "extp             %[Temp2],     $ac2,           31           \n\t" /* odd 3 */
+          "lbux             %[st1],       %[Temp1](%[cm])              \n\t" /* odd 2 */
+
+          /* odd 4. pixel */
+          "mtlo             %[vector_64], $ac1                         \n\t" /* odd 5 */
+          "mthi             $zero,        $ac1                         \n\t"
+          "preceu.ph.qbl    %[p3],        %[qload1]                    \n\t"
+          "sb               %[st1],       3(%[dst])                    \n\t" /* odd 2 */
+          "ulw              %[qload2],    17(%[src])                   \n\t"
+          "dpa.w.ph         $ac3,         %[p4],          %[filter12]  \n\t" /* odd 4 */
+          "dpa.w.ph         $ac3,         %[p1],          %[filter34]  \n\t" /* odd 4 */
+          "dpa.w.ph         $ac3,         %[p5],          %[filter56]  \n\t" /* odd 4 */
+          "dpa.w.ph         $ac3,         %[p2],          %[filter78]  \n\t" /* odd 4 */
+          "extp             %[Temp3],     $ac3,           31           \n\t" /* odd 4 */
+          "lbux             %[st2],       %[Temp2](%[cm])              \n\t" /* odd 3 */
+
+          /* odd 5. pixel */
+          "mtlo             %[vector_64], $ac2                         \n\t" /* odd 6 */
+          "mthi             $zero,        $ac2                         \n\t"
+          "preceu.ph.qbr    %[p4],        %[qload2]                    \n\t"
+          "sb               %[st2],       5(%[dst])                    \n\t" /* odd 3 */
+          "dpa.w.ph         $ac1,         %[p1],          %[filter12]  \n\t" /* odd 5 */
+          "dpa.w.ph         $ac1,         %[p5],          %[filter34]  \n\t" /* odd 5 */
+          "dpa.w.ph         $ac1,         %[p2],          %[filter56]  \n\t" /* odd 5 */
+          "dpa.w.ph         $ac1,         %[p3],          %[filter78]  \n\t" /* odd 5 */
+          "extp             %[Temp1],     $ac1,           31           \n\t" /* odd 5 */
+          "lbux             %[st3],       %[Temp3](%[cm])              \n\t" /* odd 4 */
+
+          /* odd 6. pixel */
+          "mtlo             %[vector_64], $ac3                         \n\t" /* odd 7 */
+          "mthi             $zero,        $ac3                         \n\t"
+          "preceu.ph.qbl    %[p1],        %[qload2]                    \n\t"
+          "sb               %[st3],       7(%[dst])                    \n\t" /* odd 4 */
+          "ulw              %[qload3],    21(%[src])                   \n\t"
+          "dpa.w.ph         $ac2,         %[p5],          %[filter12]  \n\t" /* odd 6 */
+          "dpa.w.ph         $ac2,         %[p2],          %[filter34]  \n\t" /* odd 6 */
+          "dpa.w.ph         $ac2,         %[p3],          %[filter56]  \n\t" /* odd 6 */
+          "dpa.w.ph         $ac2,         %[p4],          %[filter78]  \n\t" /* odd 6 */
+          "extp             %[Temp2],     $ac2,           31           \n\t" /* odd 6 */
+          "lbux             %[st1],       %[Temp1](%[cm])              \n\t" /* odd 5 */
+
+          /* odd 7. pixel */
+          "mtlo             %[vector_64], $ac1                         \n\t" /* odd 8 */
+          "mthi             $zero,        $ac1                         \n\t"
+          "preceu.ph.qbr    %[p5],        %[qload3]                    \n\t"
+          "sb               %[st1],       9(%[dst])                    \n\t" /* odd 5 */
+          "dpa.w.ph         $ac3,         %[p2],          %[filter12]  \n\t" /* odd 7 */
+          "dpa.w.ph         $ac3,         %[p3],          %[filter34]  \n\t" /* odd 7 */
+          "dpa.w.ph         $ac3,         %[p4],          %[filter56]  \n\t" /* odd 7 */
+          "dpa.w.ph         $ac3,         %[p1],          %[filter78]  \n\t" /* odd 7 */
+          "extp             %[Temp3],     $ac3,           31           \n\t" /* odd 7 */
+
+          /* odd 8. pixel */
+          "dpa.w.ph         $ac1,         %[p3],          %[filter12]  \n\t" /* odd 8 */
+          "dpa.w.ph         $ac1,         %[p4],          %[filter34]  \n\t" /* odd 8 */
+          "dpa.w.ph         $ac1,         %[p1],          %[filter56]  \n\t" /* odd 8 */
+          "dpa.w.ph         $ac1,         %[p5],          %[filter78]  \n\t" /* odd 8 */
+          "extp             %[Temp1],     $ac1,           31           \n\t" /* odd 8 */
+
+          "lbux             %[st2],       %[Temp2](%[cm])              \n\t" /* odd 6 */
+          "lbux             %[st3],       %[Temp3](%[cm])              \n\t" /* odd 7 */
+          "lbux             %[st1],       %[Temp1](%[cm])              \n\t" /* odd 8 */
+
+          "sb               %[st2],       11(%[dst])                   \n\t" /* odd 6 */
+          "sb               %[st3],       13(%[dst])                   \n\t" /* odd 7 */
+          "sb               %[st1],       15(%[dst])                   \n\t" /* odd 8 */
+
+          : [qload1] "=&r" (qload1), [qload2] "=&r" (qload2), [qload3] "=&r" (qload3),
+            [st1] "=&r" (st1), [st2] "=&r" (st2), [st3] "=&r" (st3),
+            [p1] "=&r" (p1), [p2] "=&r" (p2), [p3] "=&r" (p3), [p4] "=&r" (p4),
+            [p5] "=&r" (p5),
+            [Temp1] "=&r" (Temp1), [Temp2] "=&r" (Temp2), [Temp3] "=&r" (Temp3)
+          : [filter12] "r" (filter12), [filter34] "r" (filter34),
+            [filter56] "r" (filter56), [filter78] "r" (filter78),
+            [vector_64] "r" (vector_64),
+            [cm] "r" (cm), [dst] "r" (dst),
+            [src] "r" (src)
+      );
+
+      src += 16;
+      dst += 16;
+    }
+
+    /* Next row... */
+    src_ptr += src_stride;
+    dst_ptr += dst_stride;
+  }
+}
+
+void vpx_convolve8_horiz_dspr2(const uint8_t *src, ptrdiff_t src_stride,
+                               uint8_t *dst, ptrdiff_t dst_stride,
+                               const int16_t *filter_x, int x_step_q4,
+                               const int16_t *filter_y, int y_step_q4,
+                               int w, int h) {
+  if (((const int32_t *)filter_x)[1] == 0x800000) {
+    vpx_convolve_copy(src, src_stride,
+                      dst, dst_stride,
+                      filter_x, x_step_q4,
+                      filter_y, y_step_q4,
+                      w, h);
+  } else if (((const int32_t *)filter_x)[0] == 0) {
+    vpx_convolve2_horiz_dspr2(src, src_stride,
+                              dst, dst_stride,
+                              filter_x, x_step_q4,
+                              filter_y, y_step_q4,
+                              w, h);
+  } else {
+    if (16 == x_step_q4) {
+      uint32_t pos = 38;
+
+      prefetch_load((const uint8_t *)filter_x);
+      src -= 3;
+
+      /* bit positon for extract from acc */
+      __asm__ __volatile__ (
+        "wrdsp      %[pos],     1           \n\t"
+        :
+        : [pos] "r" (pos)
+      );
+
+      /* prefetch data to cache memory */
+      prefetch_load(src);
+      prefetch_load(src + 32);
+      prefetch_store(dst);
+
+      switch (w) {
+        case 4:
+          convolve_horiz_4_dspr2(src, (int32_t)src_stride,
+                                 dst, (int32_t)dst_stride,
+                                 filter_x, (int32_t)h);
+          break;
+        case 8:
+          convolve_horiz_8_dspr2(src, (int32_t)src_stride,
+                                 dst, (int32_t)dst_stride,
+                                 filter_x, (int32_t)h);
+          break;
+        case 16:
+          convolve_horiz_16_dspr2(src, (int32_t)src_stride,
+                                  dst, (int32_t)dst_stride,
+                                  filter_x, (int32_t)h, 1);
+          break;
+        case 32:
+          convolve_horiz_16_dspr2(src, (int32_t)src_stride,
+                                  dst, (int32_t)dst_stride,
+                                  filter_x, (int32_t)h, 2);
+          break;
+        case 64:
+          prefetch_load(src + 64);
+          prefetch_store(dst + 32);
+
+          convolve_horiz_64_dspr2(src, (int32_t)src_stride,
+                                  dst, (int32_t)dst_stride,
+                                  filter_x, (int32_t)h);
+          break;
+        default:
+          vpx_convolve8_horiz_c(src + 3, src_stride,
+                                dst, dst_stride,
+                                filter_x, x_step_q4,
+                                filter_y, y_step_q4,
+                                w, h);
+          break;
+      }
+    } else {
+      vpx_convolve8_horiz_c(src, src_stride,
+                            dst, dst_stride,
+                            filter_x, x_step_q4,
+                            filter_y, y_step_q4,
+                            w, h);
+    }
+  }
+}
+#endif
--- /dev/null
+++ b/vpx_dsp/mips/convolve8_vert_dspr2.c
@@ -1,0 +1,394 @@
+/*
+ *  Copyright (c) 2013 The WebM project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <assert.h>
+#include <stdio.h>
+
+#include "./vpx_dsp_rtcd.h"
+#include "vpx_dsp/mips/convolve_common_dspr2.h"
+#include "vpx_dsp/vpx_dsp_common.h"
+#include "vpx_dsp/vpx_filter.h"
+#include "vpx_ports/mem.h"
+
+#if HAVE_DSPR2
+static void convolve_vert_4_dspr2(const uint8_t *src,
+                                  int32_t src_stride,
+                                  uint8_t *dst,
+                                  int32_t dst_stride,
+                                  const int16_t *filter_y,
+                                  int32_t w,
+                                  int32_t h) {
+  int32_t x, y;
+  const uint8_t *src_ptr;
+  uint8_t *dst_ptr;
+  uint8_t *cm = vpx_ff_cropTbl;
+  uint32_t vector4a = 64;
+  uint32_t load1, load2, load3, load4;
+  uint32_t p1, p2;
+  uint32_t n1, n2;
+  uint32_t scratch1, scratch2;
+  uint32_t store1, store2;
+  int32_t vector1b, vector2b, vector3b, vector4b;
+  int32_t Temp1, Temp2;
+
+  vector1b = ((const int32_t *)filter_y)[0];
+  vector2b = ((const int32_t *)filter_y)[1];
+  vector3b = ((const int32_t *)filter_y)[2];
+  vector4b = ((const int32_t *)filter_y)[3];
+
+  src -= 3 * src_stride;
+
+  for (y = h; y--;) {
+    /* prefetch data to cache memory */
+    prefetch_store(dst + dst_stride);
+
+    for (x = 0; x < w; x += 4) {
+      src_ptr = src + x;
+      dst_ptr = dst + x;
+
+      __asm__ __volatile__ (
+          "ulw              %[load1],     0(%[src_ptr])                   \n\t"
+          "add              %[src_ptr],   %[src_ptr],     %[src_stride]   \n\t"
+          "ulw              %[load2],     0(%[src_ptr])                   \n\t"
+          "add              %[src_ptr],   %[src_ptr],     %[src_stride]   \n\t"
+          "ulw              %[load3],     0(%[src_ptr])                   \n\t"
+          "add              %[src_ptr],   %[src_ptr],     %[src_stride]   \n\t"
+          "ulw              %[load4],     0(%[src_ptr])                   \n\t"
+
+          "mtlo             %[vector4a],  $ac0                            \n\t"
+          "mtlo             %[vector4a],  $ac1                            \n\t"
+          "mtlo             %[vector4a],  $ac2                            \n\t"
+          "mtlo             %[vector4a],  $ac3                            \n\t"
+          "mthi             $zero,        $ac0                            \n\t"
+          "mthi             $zero,        $ac1                            \n\t"
+          "mthi             $zero,        $ac2                            \n\t"
+          "mthi             $zero,        $ac3                            \n\t"
+
+          "preceu.ph.qbr    %[scratch1],  %[load1]                        \n\t"
+          "preceu.ph.qbr    %[p1],        %[load2]                        \n\t"
+          "precrq.ph.w      %[n1],        %[p1],          %[scratch1]     \n\t" /* pixel 2 */
+          "append           %[p1],        %[scratch1],    16              \n\t" /* pixel 1 */
+          "preceu.ph.qbr    %[scratch2],  %[load3]                        \n\t"
+          "preceu.ph.qbr    %[p2],        %[load4]                        \n\t"
+          "precrq.ph.w      %[n2],        %[p2],          %[scratch2]     \n\t" /* pixel 2 */
+          "append           %[p2],        %[scratch2],    16              \n\t" /* pixel 1 */
+
+          "dpa.w.ph         $ac0,         %[p1],          %[vector1b]     \n\t"
+          "dpa.w.ph         $ac0,         %[p2],          %[vector2b]     \n\t"
+          "dpa.w.ph         $ac1,         %[n1],          %[vector1b]     \n\t"
+          "dpa.w.ph         $ac1,         %[n2],          %[vector2b]     \n\t"
+
+          "preceu.ph.qbl    %[scratch1],  %[load1]                        \n\t"
+          "preceu.ph.qbl    %[p1],        %[load2]                        \n\t"
+          "precrq.ph.w      %[n1],        %[p1],          %[scratch1]     \n\t" /* pixel 2 */
+          "append           %[p1],        %[scratch1],    16              \n\t" /* pixel 1 */
+          "preceu.ph.qbl    %[scratch2],  %[load3]                        \n\t"
+          "preceu.ph.qbl    %[p2],        %[load4]                        \n\t"
+          "precrq.ph.w      %[n2],        %[p2],          %[scratch2]     \n\t" /* pixel 2 */
+          "append           %[p2],        %[scratch2],    16              \n\t" /* pixel 1 */
+
+          "dpa.w.ph         $ac2,         %[p1],          %[vector1b]     \n\t"
+          "dpa.w.ph         $ac2,         %[p2],          %[vector2b]     \n\t"
+          "dpa.w.ph         $ac3,         %[n1],          %[vector1b]     \n\t"
+          "dpa.w.ph         $ac3,         %[n2],          %[vector2b]     \n\t"
+
+          "add              %[src_ptr],   %[src_ptr],     %[src_stride]   \n\t"
+          "ulw              %[load1],     0(%[src_ptr])                   \n\t"
+          "add              %[src_ptr],   %[src_ptr],     %[src_stride]   \n\t"
+          "ulw              %[load2],     0(%[src_ptr])                   \n\t"
+          "add              %[src_ptr],   %[src_ptr],     %[src_stride]   \n\t"
+          "ulw              %[load3],     0(%[src_ptr])                   \n\t"
+          "add              %[src_ptr],   %[src_ptr],     %[src_stride]   \n\t"
+          "ulw              %[load4],     0(%[src_ptr])                   \n\t"
+
+          "preceu.ph.qbr    %[scratch1],  %[load1]                        \n\t"
+          "preceu.ph.qbr    %[p1],        %[load2]                        \n\t"
+          "precrq.ph.w      %[n1],        %[p1],          %[scratch1]     \n\t" /* pixel 2 */
+          "append           %[p1],        %[scratch1],    16              \n\t" /* pixel 1 */
+          "preceu.ph.qbr    %[scratch2],  %[load3]                        \n\t"
+          "preceu.ph.qbr    %[p2],        %[load4]                        \n\t"
+          "precrq.ph.w      %[n2],        %[p2],          %[scratch2]     \n\t" /* pixel 2 */
+          "append           %[p2],        %[scratch2],    16              \n\t" /* pixel 1 */
+
+          "dpa.w.ph         $ac0,         %[p1],          %[vector3b]     \n\t"
+          "dpa.w.ph         $ac0,         %[p2],          %[vector4b]     \n\t"
+          "extp             %[Temp1],     $ac0,           31              \n\t"
+          "dpa.w.ph         $ac1,         %[n1],          %[vector3b]     \n\t"
+          "dpa.w.ph         $ac1,         %[n2],          %[vector4b]     \n\t"
+          "extp             %[Temp2],     $ac1,           31              \n\t"
+
+          "preceu.ph.qbl    %[scratch1],  %[load1]                        \n\t"
+          "preceu.ph.qbl    %[p1],        %[load2]                        \n\t"
+          "precrq.ph.w      %[n1],        %[p1],          %[scratch1]     \n\t" /* pixel 2 */
+          "append           %[p1],        %[scratch1],    16              \n\t" /* pixel 1 */
+          "preceu.ph.qbl    %[scratch2],  %[load3]                        \n\t"
+          "preceu.ph.qbl    %[p2],        %[load4]                        \n\t"
+          "precrq.ph.w      %[n2],        %[p2],          %[scratch2]     \n\t" /* pixel 2 */
+          "append           %[p2],        %[scratch2],    16              \n\t" /* pixel 1 */
+
+          "lbux             %[store1],    %[Temp1](%[cm])                 \n\t"
+          "dpa.w.ph         $ac2,         %[p1],          %[vector3b]     \n\t"
+          "dpa.w.ph         $ac2,         %[p2],          %[vector4b]     \n\t"
+          "extp             %[Temp1],     $ac2,           31              \n\t"
+
+          "lbux             %[store2],    %[Temp2](%[cm])                 \n\t"
+          "dpa.w.ph         $ac3,         %[n1],          %[vector3b]     \n\t"
+          "dpa.w.ph         $ac3,         %[n2],          %[vector4b]     \n\t"
+          "extp             %[Temp2],     $ac3,           31              \n\t"
+
+          "sb               %[store1],    0(%[dst_ptr])                   \n\t"
+          "sb               %[store2],    1(%[dst_ptr])                   \n\t"
+
+          "lbux             %[store1],    %[Temp1](%[cm])                 \n\t"
+          "lbux             %[store2],    %[Temp2](%[cm])                 \n\t"
+
+          "sb               %[store1],    2(%[dst_ptr])                   \n\t"
+          "sb               %[store2],    3(%[dst_ptr])                   \n\t"
+
+          : [load1] "=&r" (load1), [load2] "=&r" (load2),
+            [load3] "=&r" (load3), [load4] "=&r" (load4),
+            [p1] "=&r" (p1), [p2] "=&r" (p2),
+            [n1] "=&r" (n1), [n2] "=&r" (n2),
+            [scratch1] "=&r" (scratch1), [scratch2] "=&r" (scratch2),
+            [Temp1] "=&r" (Temp1), [Temp2] "=&r" (Temp2),
+            [store1] "=&r" (store1), [store2] "=&r" (store2),
+            [src_ptr] "+r" (src_ptr)
+          : [vector1b] "r" (vector1b), [vector2b] "r" (vector2b),
+            [vector3b] "r" (vector3b), [vector4b] "r" (vector4b),
+            [vector4a] "r" (vector4a), [src_stride] "r" (src_stride),
+            [cm] "r" (cm), [dst_ptr] "r" (dst_ptr)
+      );
+    }
+
+    /* Next row... */
+    src += src_stride;
+    dst += dst_stride;
+  }
+}
+
+static void convolve_vert_64_dspr2(const uint8_t *src,
+                                   int32_t src_stride,
+                                   uint8_t *dst,
+                                   int32_t dst_stride,
+                                   const int16_t *filter_y,
+                                   int32_t h) {
+  int32_t x, y;
+  const uint8_t *src_ptr;
+  uint8_t *dst_ptr;
+  uint8_t *cm = vpx_ff_cropTbl;
+  uint32_t vector4a = 64;
+  uint32_t load1, load2, load3, load4;
+  uint32_t p1, p2;
+  uint32_t n1, n2;
+  uint32_t scratch1, scratch2;
+  uint32_t store1, store2;
+  int32_t vector1b, vector2b, vector3b, vector4b;
+  int32_t Temp1, Temp2;
+
+  vector1b = ((const int32_t *)filter_y)[0];
+  vector2b = ((const int32_t *)filter_y)[1];
+  vector3b = ((const int32_t *)filter_y)[2];
+  vector4b = ((const int32_t *)filter_y)[3];
+
+  src -= 3 * src_stride;
+
+  for (y = h; y--;) {
+    /* prefetch data to cache memory */
+    prefetch_store(dst + dst_stride);
+    prefetch_store(dst + dst_stride + 32);
+
+    for (x = 0; x < 64; x += 4) {
+      src_ptr = src + x;
+      dst_ptr = dst + x;
+
+      __asm__ __volatile__ (
+          "ulw              %[load1],     0(%[src_ptr])                   \n\t"
+          "add              %[src_ptr],   %[src_ptr],     %[src_stride]   \n\t"
+          "ulw              %[load2],     0(%[src_ptr])                   \n\t"
+          "add              %[src_ptr],   %[src_ptr],     %[src_stride]   \n\t"
+          "ulw              %[load3],     0(%[src_ptr])                   \n\t"
+          "add              %[src_ptr],   %[src_ptr],     %[src_stride]   \n\t"
+          "ulw              %[load4],     0(%[src_ptr])                   \n\t"
+
+          "mtlo             %[vector4a],  $ac0                            \n\t"
+          "mtlo             %[vector4a],  $ac1                            \n\t"
+          "mtlo             %[vector4a],  $ac2                            \n\t"
+          "mtlo             %[vector4a],  $ac3                            \n\t"
+          "mthi             $zero,        $ac0                            \n\t"
+          "mthi             $zero,        $ac1                            \n\t"
+          "mthi             $zero,        $ac2                            \n\t"
+          "mthi             $zero,        $ac3                            \n\t"
+
+          "preceu.ph.qbr    %[scratch1],  %[load1]                        \n\t"
+          "preceu.ph.qbr    %[p1],        %[load2]                        \n\t"
+          "precrq.ph.w      %[n1],        %[p1],          %[scratch1]     \n\t" /* pixel 2 */
+          "append           %[p1],        %[scratch1],    16              \n\t" /* pixel 1 */
+          "preceu.ph.qbr    %[scratch2],  %[load3]                        \n\t"
+          "preceu.ph.qbr    %[p2],        %[load4]                        \n\t"
+          "precrq.ph.w      %[n2],        %[p2],          %[scratch2]     \n\t" /* pixel 2 */
+          "append           %[p2],        %[scratch2],    16              \n\t" /* pixel 1 */
+
+          "dpa.w.ph         $ac0,         %[p1],          %[vector1b]     \n\t"
+          "dpa.w.ph         $ac0,         %[p2],          %[vector2b]     \n\t"
+          "dpa.w.ph         $ac1,         %[n1],          %[vector1b]     \n\t"
+          "dpa.w.ph         $ac1,         %[n2],          %[vector2b]     \n\t"
+
+          "preceu.ph.qbl    %[scratch1],  %[load1]                        \n\t"
+          "preceu.ph.qbl    %[p1],        %[load2]                        \n\t"
+          "precrq.ph.w      %[n1],        %[p1],          %[scratch1]     \n\t" /* pixel 2 */
+          "append           %[p1],        %[scratch1],    16              \n\t" /* pixel 1 */
+          "preceu.ph.qbl    %[scratch2],  %[load3]                        \n\t"
+          "preceu.ph.qbl    %[p2],        %[load4]                        \n\t"
+          "precrq.ph.w      %[n2],        %[p2],          %[scratch2]     \n\t" /* pixel 2 */
+          "append           %[p2],        %[scratch2],    16              \n\t" /* pixel 1 */
+
+          "dpa.w.ph         $ac2,         %[p1],          %[vector1b]     \n\t"
+          "dpa.w.ph         $ac2,         %[p2],          %[vector2b]     \n\t"
+          "dpa.w.ph         $ac3,         %[n1],          %[vector1b]     \n\t"
+          "dpa.w.ph         $ac3,         %[n2],          %[vector2b]     \n\t"
+
+          "add              %[src_ptr],   %[src_ptr],     %[src_stride]   \n\t"
+          "ulw              %[load1],     0(%[src_ptr])                   \n\t"
+          "add              %[src_ptr],   %[src_ptr],     %[src_stride]   \n\t"
+          "ulw              %[load2],     0(%[src_ptr])                   \n\t"
+          "add              %[src_ptr],   %[src_ptr],     %[src_stride]   \n\t"
+          "ulw              %[load3],     0(%[src_ptr])                   \n\t"
+          "add              %[src_ptr],   %[src_ptr],     %[src_stride]   \n\t"
+          "ulw              %[load4],     0(%[src_ptr])                   \n\t"
+
+          "preceu.ph.qbr    %[scratch1],  %[load1]                        \n\t"
+          "preceu.ph.qbr    %[p1],        %[load2]                        \n\t"
+          "precrq.ph.w      %[n1],        %[p1],          %[scratch1]     \n\t" /* pixel 2 */
+          "append           %[p1],        %[scratch1],    16              \n\t" /* pixel 1 */
+          "preceu.ph.qbr    %[scratch2],  %[load3]                        \n\t"
+          "preceu.ph.qbr    %[p2],        %[load4]                        \n\t"
+          "precrq.ph.w      %[n2],        %[p2],          %[scratch2]     \n\t" /* pixel 2 */
+          "append           %[p2],        %[scratch2],    16              \n\t" /* pixel 1 */
+
+          "dpa.w.ph         $ac0,         %[p1],          %[vector3b]     \n\t"
+          "dpa.w.ph         $ac0,         %[p2],          %[vector4b]     \n\t"
+          "extp             %[Temp1],     $ac0,           31              \n\t"
+          "dpa.w.ph         $ac1,         %[n1],          %[vector3b]     \n\t"
+          "dpa.w.ph         $ac1,         %[n2],          %[vector4b]     \n\t"
+          "extp             %[Temp2],     $ac1,           31              \n\t"
+
+          "preceu.ph.qbl    %[scratch1],  %[load1]                        \n\t"
+          "preceu.ph.qbl    %[p1],        %[load2]                        \n\t"
+          "precrq.ph.w      %[n1],        %[p1],          %[scratch1]     \n\t" /* pixel 2 */
+          "append           %[p1],        %[scratch1],    16              \n\t" /* pixel 1 */
+          "preceu.ph.qbl    %[scratch2],  %[load3]                        \n\t"
+          "preceu.ph.qbl    %[p2],        %[load4]                        \n\t"
+          "precrq.ph.w      %[n2],        %[p2],          %[scratch2]     \n\t" /* pixel 2 */
+          "append           %[p2],        %[scratch2],    16              \n\t" /* pixel 1 */
+
+          "lbux             %[store1],    %[Temp1](%[cm])                 \n\t"
+          "dpa.w.ph         $ac2,         %[p1],          %[vector3b]     \n\t"
+          "dpa.w.ph         $ac2,         %[p2],          %[vector4b]     \n\t"
+          "extp             %[Temp1],     $ac2,           31              \n\t"
+
+          "lbux             %[store2],    %[Temp2](%[cm])                 \n\t"
+          "dpa.w.ph         $ac3,         %[n1],          %[vector3b]     \n\t"
+          "dpa.w.ph         $ac3,         %[n2],          %[vector4b]     \n\t"
+          "extp             %[Temp2],     $ac3,           31              \n\t"
+
+          "sb               %[store1],    0(%[dst_ptr])                   \n\t"
+          "sb               %[store2],    1(%[dst_ptr])                   \n\t"
+
+          "lbux             %[store1],    %[Temp1](%[cm])                 \n\t"
+          "lbux             %[store2],    %[Temp2](%[cm])                 \n\t"
+
+          "sb               %[store1],    2(%[dst_ptr])                   \n\t"
+          "sb               %[store2],    3(%[dst_ptr])                   \n\t"
+
+          : [load1] "=&r" (load1), [load2] "=&r" (load2),
+            [load3] "=&r" (load3), [load4] "=&r" (load4),
+            [p1] "=&r" (p1), [p2] "=&r" (p2),
+            [n1] "=&r" (n1), [n2] "=&r" (n2),
+            [scratch1] "=&r" (scratch1), [scratch2] "=&r" (scratch2),
+            [Temp1] "=&r" (Temp1), [Temp2] "=&r" (Temp2),
+            [store1] "=&r" (store1), [store2] "=&r" (store2),
+            [src_ptr] "+r" (src_ptr)
+          : [vector1b] "r" (vector1b), [vector2b] "r" (vector2b),
+            [vector3b] "r" (vector3b), [vector4b] "r" (vector4b),
+            [vector4a] "r" (vector4a), [src_stride] "r" (src_stride),
+            [cm] "r" (cm), [dst_ptr] "r" (dst_ptr)
+      );
+    }
+
+    /* Next row... */
+    src += src_stride;
+    dst += dst_stride;
+  }
+}
+
+void vpx_convolve8_vert_dspr2(const uint8_t *src, ptrdiff_t src_stride,
+                              uint8_t *dst, ptrdiff_t dst_stride,
+                              const int16_t *filter_x, int x_step_q4,
+                              const int16_t *filter_y, int y_step_q4,
+                              int w, int h) {
+  if (((const int32_t *)filter_y)[1] == 0x800000) {
+    vpx_convolve_copy(src, src_stride,
+                      dst, dst_stride,
+                      filter_x, x_step_q4,
+                      filter_y, y_step_q4,
+                      w, h);
+  } else if (((const int32_t *)filter_y)[0] == 0) {
+    vpx_convolve2_vert_dspr2(src, src_stride,
+                             dst, dst_stride,
+                             filter_x, x_step_q4,
+                             filter_y, y_step_q4,
+                             w, h);
+  } else {
+    if (16 == y_step_q4) {
+      uint32_t pos = 38;
+
+      /* bit positon for extract from acc */
+      __asm__ __volatile__ (
+        "wrdsp      %[pos],     1           \n\t"
+        :
+        : [pos] "r" (pos)
+      );
+
+      prefetch_store(dst);
+
+      switch (w) {
+        case 4 :
+        case 8 :
+        case 16 :
+        case 32 :
+          convolve_vert_4_dspr2(src, src_stride,
+                                dst, dst_stride,
+                                filter_y, w, h);
+          break;
+        case 64 :
+          prefetch_store(dst + 32);
+          convolve_vert_64_dspr2(src, src_stride,
+                                 dst, dst_stride,
+                                 filter_y, h);
+          break;
+        default:
+          vpx_convolve8_vert_c(src, src_stride,
+                               dst, dst_stride,
+                               filter_x, x_step_q4,
+                               filter_y, y_step_q4,
+                               w, h);
+          break;
+      }
+    } else {
+      vpx_convolve8_vert_c(src, src_stride,
+                           dst, dst_stride,
+                           filter_x, x_step_q4,
+                           filter_y, y_step_q4,
+                           w, h);
+    }
+  }
+}
+
+#endif
--- /dev/null
+++ b/vpx_dsp/mips/convolve_common_dspr2.h
@@ -1,0 +1,59 @@
+/*
+ *  Copyright (c) 2013 The WebM project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef VPX_DSP_MIPS_VPX_COMMON_DSPR2_H_
+#define VPX_DSP_MIPS_VPX_COMMON_DSPR2_H_
+
+#include <assert.h>
+
+#include "./vpx_config.h"
+#include "vpx/vpx_integer.h"
+#include "vpx_dsp/mips/common_dspr2.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#if HAVE_DSPR2
+void vpx_convolve2_horiz_dspr2(const uint8_t *src, ptrdiff_t src_stride,
+                               uint8_t *dst, ptrdiff_t dst_stride,
+                               const int16_t *filter_x, int x_step_q4,
+                               const int16_t *filter_y, int y_step_q4,
+                               int w, int h);
+
+void vpx_convolve2_avg_horiz_dspr2(const uint8_t *src, ptrdiff_t src_stride,
+                                   uint8_t *dst, ptrdiff_t dst_stride,
+                                   const int16_t *filter_x, int x_step_q4,
+                                   const int16_t *filter_y, int y_step_q4,
+                                   int w, int h);
+
+void vpx_convolve2_avg_vert_dspr2(const uint8_t *src, ptrdiff_t src_stride,
+                                  uint8_t *dst, ptrdiff_t dst_stride,
+                                  const int16_t *filter_x, int x_step_q4,
+                                  const int16_t *filter_y, int y_step_q4,
+                                  int w, int h);
+
+void vpx_convolve2_dspr2(const uint8_t *src, ptrdiff_t src_stride,
+                         uint8_t *dst, ptrdiff_t dst_stride,
+                         const int16_t *filter,
+                         int w, int h);
+
+void vpx_convolve2_vert_dspr2(const uint8_t *src, ptrdiff_t src_stride,
+                              uint8_t *dst, ptrdiff_t dst_stride,
+                              const int16_t *filter_x, int x_step_q4,
+                              const int16_t *filter_y, int y_step_q4,
+                              int w, int h);
+
+#endif  // #if HAVE_DSPR2
+#ifdef __cplusplus
+}  // extern "C"
+#endif
+
+#endif  // VPX_DSP_MIPS_VPX_COMMON_DSPR2_H_
--- a/vpx_dsp/mips/vpx_common_dspr2.h
+++ /dev/null
@@ -1,61 +1,0 @@
-/*
- *  Copyright (c) 2013 The WebM project authors. All Rights Reserved.
- *
- *  Use of this source code is governed by a BSD-style license
- *  that can be found in the LICENSE file in the root of the source
- *  tree. An additional intellectual property rights grant can be found
- *  in the file PATENTS.  All contributing project authors may
- *  be found in the AUTHORS file in the root of the source tree.
- */
-
-#ifndef VPX_DSP_MIPS_VPX_COMMON_DSPR2_H_
-#define VPX_DSP_MIPS_VPX_COMMON_DSPR2_H_
-
-#include <assert.h>
-
-#include "./vpx_config.h"
-#include "vpx/vpx_integer.h"
-#include "vpx_dsp/mips/common_dspr2.h"
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#if HAVE_DSPR2
-extern uint8_t *vpx_ff_cropTbl;
-
-void vpx_convolve2_horiz_dspr2(const uint8_t *src, ptrdiff_t src_stride,
-                               uint8_t *dst, ptrdiff_t dst_stride,
-                               const int16_t *filter_x, int x_step_q4,
-                               const int16_t *filter_y, int y_step_q4,
-                               int w, int h);
-
-void vpx_convolve2_avg_horiz_dspr2(const uint8_t *src, ptrdiff_t src_stride,
-                                   uint8_t *dst, ptrdiff_t dst_stride,
-                                   const int16_t *filter_x, int x_step_q4,
-                                   const int16_t *filter_y, int y_step_q4,
-                                   int w, int h);
-
-void vpx_convolve2_avg_vert_dspr2(const uint8_t *src, ptrdiff_t src_stride,
-                                  uint8_t *dst, ptrdiff_t dst_stride,
-                                  const int16_t *filter_x, int x_step_q4,
-                                  const int16_t *filter_y, int y_step_q4,
-                                  int w, int h);
-
-void vpx_convolve2_dspr2(const uint8_t *src, ptrdiff_t src_stride,
-                         uint8_t *dst, ptrdiff_t dst_stride,
-                         const int16_t *filter,
-                         int w, int h);
-
-void vpx_convolve2_vert_dspr2(const uint8_t *src, ptrdiff_t src_stride,
-                              uint8_t *dst, ptrdiff_t dst_stride,
-                              const int16_t *filter_x, int x_step_q4,
-                              const int16_t *filter_y, int y_step_q4,
-                              int w, int h);
-
-#endif  // #if HAVE_DSPR2
-#ifdef __cplusplus
-}  // extern "C"
-#endif
-
-#endif  // VPX_DSP_MIPS_VPX_COMMON_DSPR2_H_
--- a/vpx_dsp/mips/vpx_convolve2_avg_dspr2.c
+++ /dev/null
@@ -1,279 +1,0 @@
-/*
- *  Copyright (c) 2013 The WebM project authors. All Rights Reserved.
- *
- *  Use of this source code is governed by a BSD-style license
- *  that can be found in the LICENSE file in the root of the source
- *  tree. An additional intellectual property rights grant can be found
- *  in the file PATENTS.  All contributing project authors may
- *  be found in the AUTHORS file in the root of the source tree.
- */
-
-#include <assert.h>
-#include <stdio.h>
-
-#include "./vpx_dsp_rtcd.h"
-#include "vpx_dsp/mips/vpx_common_dspr2.h"
-#include "vpx_dsp/vpx_convolve.h"
-#include "vpx_dsp/vpx_dsp_common.h"
-#include "vpx_ports/mem.h"
-
-#if HAVE_DSPR2
-static void convolve_bi_avg_vert_4_dspr2(const uint8_t *src,
-                                         int32_t src_stride,
-                                         uint8_t *dst,
-                                         int32_t dst_stride,
-                                         const int16_t *filter_y,
-                                         int32_t w,
-                                         int32_t h) {
-  int32_t       x, y;
-  const uint8_t *src_ptr;
-  uint8_t       *dst_ptr;
-  uint8_t       *cm = vpx_ff_cropTbl;
-  uint32_t      vector4a = 64;
-  uint32_t      load1, load2;
-  uint32_t      p1, p2;
-  uint32_t      scratch1, scratch2;
-  uint32_t      store1, store2;
-  int32_t       Temp1, Temp2;
-  const int16_t *filter = &filter_y[3];
-  uint32_t      filter45;
-
-  filter45 = ((const int32_t *)filter)[0];
-
-  for (y = h; y--;) {
-    /* prefetch data to cache memory */
-    prefetch_store(dst + dst_stride);
-
-    for (x = 0; x < w; x += 4) {
-      src_ptr = src + x;
-      dst_ptr = dst + x;
-
-      __asm__ __volatile__ (
-          "ulw              %[load1],     0(%[src_ptr])                   \n\t"
-          "add              %[src_ptr],   %[src_ptr],     %[src_stride]   \n\t"
-          "ulw              %[load2],     0(%[src_ptr])                   \n\t"
-
-          "mtlo             %[vector4a],  $ac0                            \n\t"
-          "mtlo             %[vector4a],  $ac1                            \n\t"
-          "mtlo             %[vector4a],  $ac2                            \n\t"
-          "mtlo             %[vector4a],  $ac3                            \n\t"
-          "mthi             $zero,        $ac0                            \n\t"
-          "mthi             $zero,        $ac1                            \n\t"
-          "mthi             $zero,        $ac2                            \n\t"
-          "mthi             $zero,        $ac3                            \n\t"
-
-          "preceu.ph.qbr    %[scratch1],  %[load1]                        \n\t"
-          "preceu.ph.qbr    %[p1],        %[load2]                        \n\t"
-          "precrq.ph.w      %[p2],        %[p1],          %[scratch1]     \n\t" /* pixel 2 */
-          "append           %[p1],        %[scratch1],    16              \n\t" /* pixel 1 */
-
-          "dpa.w.ph         $ac0,         %[p1],          %[filter45]     \n\t"
-          "dpa.w.ph         $ac1,         %[p2],          %[filter45]     \n\t"
-
-          "preceu.ph.qbl    %[scratch1],  %[load1]                        \n\t"
-          "preceu.ph.qbl    %[p1],        %[load2]                        \n\t"
-          "precrq.ph.w      %[p2],        %[p1],          %[scratch1]     \n\t" /* pixel 2 */
-          "append           %[p1],        %[scratch1],    16              \n\t" /* pixel 1 */
-
-          "dpa.w.ph         $ac2,         %[p1],          %[filter45]     \n\t"
-          "dpa.w.ph         $ac3,         %[p2],          %[filter45]     \n\t"
-
-          "extp             %[Temp1],     $ac0,           31              \n\t"
-          "extp             %[Temp2],     $ac1,           31              \n\t"
-
-          "lbu              %[scratch1],  0(%[dst_ptr])                   \n\t"
-          "lbu              %[scratch2],  1(%[dst_ptr])                   \n\t"
-
-          "lbux             %[store1],    %[Temp1](%[cm])                 \n\t"
-          "addqh_r.w        %[store1],    %[store1],      %[scratch1]     \n\t" /* pixel 1 */
-          "extp             %[Temp1],     $ac2,           31              \n\t"
-
-          "lbux             %[store2],    %[Temp2](%[cm])                 \n\t"
-          "addqh_r.w        %[store2],    %[store2],      %[scratch2]     \n\t" /* pixel 2 */
-          "extp             %[Temp2],     $ac3,           31              \n\t"
-          "lbu              %[scratch1],  2(%[dst_ptr])                   \n\t"
-
-          "sb               %[store1],    0(%[dst_ptr])                   \n\t"
-          "sb               %[store2],    1(%[dst_ptr])                   \n\t"
-          "lbu              %[scratch2],  3(%[dst_ptr])                   \n\t"
-
-          "lbux             %[store1],    %[Temp1](%[cm])                 \n\t"
-          "lbux             %[store2],    %[Temp2](%[cm])                 \n\t"
-          "addqh_r.w        %[store1],    %[store1],      %[scratch1]     \n\t" /* pixel 3 */
-          "addqh_r.w        %[store2],    %[store2],      %[scratch2]     \n\t" /* pixel 4 */
-
-          "sb               %[store1],    2(%[dst_ptr])                   \n\t"
-          "sb               %[store2],    3(%[dst_ptr])                   \n\t"
-
-          : [load1] "=&r" (load1), [load2] "=&r" (load2),
-            [p1] "=&r" (p1), [p2] "=&r" (p2),
-            [scratch1] "=&r" (scratch1), [scratch2] "=&r" (scratch2),
-            [Temp1] "=&r" (Temp1), [Temp2] "=&r" (Temp2),
-            [store1] "=&r" (store1), [store2] "=&r" (store2),
-            [src_ptr] "+r" (src_ptr)
-          : [filter45] "r" (filter45), [vector4a] "r" (vector4a),
-            [src_stride] "r" (src_stride), [cm] "r" (cm),
-            [dst_ptr] "r" (dst_ptr)
-      );
-    }
-
-    /* Next row... */
-    src += src_stride;
-    dst += dst_stride;
-  }
-}
-
-static void convolve_bi_avg_vert_64_dspr2(const uint8_t *src,
-                                          int32_t src_stride,
-                                          uint8_t *dst,
-                                          int32_t dst_stride,
-                                          const int16_t *filter_y,
-                                          int32_t h) {
-  int32_t       x, y;
-  const uint8_t *src_ptr;
-  uint8_t       *dst_ptr;
-  uint8_t       *cm = vpx_ff_cropTbl;
-  uint32_t      vector4a = 64;
-  uint32_t      load1, load2;
-  uint32_t      p1, p2;
-  uint32_t      scratch1, scratch2;
-  uint32_t      store1, store2;
-  int32_t       Temp1, Temp2;
-  const int16_t *filter = &filter_y[3];
-  uint32_t filter45;;
-
-  filter45 = ((const int32_t *)filter)[0];
-
-  for (y = h; y--;) {
-    /* prefetch data to cache memory */
-    prefetch_store(dst + dst_stride);
-    prefetch_store(dst + dst_stride + 32);
-
-    for (x = 0; x < 64; x += 4) {
-      src_ptr = src + x;
-      dst_ptr = dst + x;
-
-      __asm__ __volatile__ (
-          "ulw              %[load1],     0(%[src_ptr])                   \n\t"
-          "add              %[src_ptr],   %[src_ptr],     %[src_stride]   \n\t"
-          "ulw              %[load2],     0(%[src_ptr])                   \n\t"
-
-          "mtlo             %[vector4a],  $ac0                            \n\t"
-          "mtlo             %[vector4a],  $ac1                            \n\t"
-          "mtlo             %[vector4a],  $ac2                            \n\t"
-          "mtlo             %[vector4a],  $ac3                            \n\t"
-          "mthi             $zero,        $ac0                            \n\t"
-          "mthi             $zero,        $ac1                            \n\t"
-          "mthi             $zero,        $ac2                            \n\t"
-          "mthi             $zero,        $ac3                            \n\t"
-
-          "preceu.ph.qbr    %[scratch1],  %[load1]                        \n\t"
-          "preceu.ph.qbr    %[p1],        %[load2]                        \n\t"
-          "precrq.ph.w      %[p2],        %[p1],          %[scratch1]     \n\t" /* pixel 2 */
-          "append           %[p1],        %[scratch1],    16              \n\t" /* pixel 1 */
-
-          "dpa.w.ph         $ac0,         %[p1],          %[filter45]     \n\t"
-          "dpa.w.ph         $ac1,         %[p2],          %[filter45]     \n\t"
-
-          "preceu.ph.qbl    %[scratch1],  %[load1]                        \n\t"
-          "preceu.ph.qbl    %[p1],        %[load2]                        \n\t"
-          "precrq.ph.w      %[p2],        %[p1],          %[scratch1]     \n\t" /* pixel 2 */
-          "append           %[p1],        %[scratch1],    16              \n\t" /* pixel 1 */
-
-          "dpa.w.ph         $ac2,         %[p1],          %[filter45]     \n\t"
-          "dpa.w.ph         $ac3,         %[p2],          %[filter45]     \n\t"
-
-          "extp             %[Temp1],     $ac0,           31              \n\t"
-          "extp             %[Temp2],     $ac1,           31              \n\t"
-
-          "lbu              %[scratch1],  0(%[dst_ptr])                   \n\t"
-          "lbu              %[scratch2],  1(%[dst_ptr])                   \n\t"
-
-          "lbux             %[store1],    %[Temp1](%[cm])                 \n\t"
-          "addqh_r.w        %[store1],    %[store1],      %[scratch1]     \n\t" /* pixel 1 */
-          "extp             %[Temp1],     $ac2,           31              \n\t"
-
-          "lbux             %[store2],    %[Temp2](%[cm])                 \n\t"
-          "addqh_r.w        %[store2],    %[store2],      %[scratch2]     \n\t" /* pixel 2 */
-          "extp             %[Temp2],     $ac3,           31              \n\t"
-          "lbu              %[scratch1],  2(%[dst_ptr])                   \n\t"
-
-          "sb               %[store1],    0(%[dst_ptr])                   \n\t"
-          "sb               %[store2],    1(%[dst_ptr])                   \n\t"
-          "lbu              %[scratch2],  3(%[dst_ptr])                   \n\t"
-
-          "lbux             %[store1],    %[Temp1](%[cm])                 \n\t"
-          "lbux             %[store2],    %[Temp2](%[cm])                 \n\t"
-          "addqh_r.w        %[store1],    %[store1],      %[scratch1]     \n\t" /* pixel 3 */
-          "addqh_r.w        %[store2],    %[store2],      %[scratch2]     \n\t" /* pixel 4 */
-
-          "sb               %[store1],    2(%[dst_ptr])                   \n\t"
-          "sb               %[store2],    3(%[dst_ptr])                   \n\t"
-
-          : [load1] "=&r" (load1), [load2] "=&r" (load2),
-            [p1] "=&r" (p1), [p2] "=&r" (p2),
-            [scratch1] "=&r" (scratch1), [scratch2] "=&r" (scratch2),
-            [Temp1] "=&r" (Temp1), [Temp2] "=&r" (Temp2),
-            [store1] "=&r" (store1), [store2] "=&r" (store2),
-            [src_ptr] "+r" (src_ptr)
-          : [filter45] "r" (filter45), [vector4a] "r" (vector4a),
-            [src_stride] "r" (src_stride), [cm] "r" (cm),
-            [dst_ptr] "r" (dst_ptr)
-      );
-    }
-
-    /* Next row... */
-    src += src_stride;
-    dst += dst_stride;
-  }
-}
-
-void vpx_convolve2_avg_vert_dspr2(const uint8_t *src, ptrdiff_t src_stride,
-                                  uint8_t *dst, ptrdiff_t dst_stride,
-                                  const int16_t *filter_x, int x_step_q4,
-                                  const int16_t *filter_y, int y_step_q4,
-                                  int w, int h) {
-  if (16 == y_step_q4) {
-    uint32_t pos = 38;
-
-    /* bit positon for extract from acc */
-    __asm__ __volatile__ (
-      "wrdsp      %[pos],     1           \n\t"
-      :
-      : [pos] "r" (pos)
-    );
-
-    prefetch_store(dst);
-
-    switch (w) {
-      case 4:
-      case 8:
-      case 16:
-      case 32:
-        convolve_bi_avg_vert_4_dspr2(src, src_stride,
-                                     dst, dst_stride,
-                                     filter_y, w, h);
-        break;
-      case 64:
-        prefetch_store(dst + 32);
-        convolve_bi_avg_vert_64_dspr2(src, src_stride,
-                                      dst, dst_stride,
-                                      filter_y, h);
-        break;
-      default:
-        vpx_convolve8_avg_vert_c(src, src_stride,
-                                 dst, dst_stride,
-                                 filter_x, x_step_q4,
-                                 filter_y, y_step_q4,
-                                 w, h);
-        break;
-    }
-  } else {
-    vpx_convolve8_avg_vert_c(src, src_stride,
-                             dst, dst_stride,
-                             filter_x, x_step_q4,
-                             filter_y, y_step_q4,
-                             w, h);
-  }
-}
-#endif
--- a/vpx_dsp/mips/vpx_convolve2_avg_horiz_dspr2.c
+++ /dev/null
@@ -1,831 +1,0 @@
-/*
- *  Copyright (c) 2013 The WebM project authors. All Rights Reserved.
- *
- *  Use of this source code is governed by a BSD-style license
- *  that can be found in the LICENSE file in the root of the source
- *  tree. An additional intellectual property rights grant can be found
- *  in the file PATENTS.  All contributing project authors may
- *  be found in the AUTHORS file in the root of the source tree.
- */
-
-#include <assert.h>
-#include <stdio.h>
-
-#include "./vpx_dsp_rtcd.h"
-#include "vpx_dsp/mips/vpx_common_dspr2.h"
-#include "vpx_dsp/vpx_convolve.h"
-#include "vpx_dsp/vpx_dsp_common.h"
-#include "vpx_ports/mem.h"
-
-#if HAVE_DSPR2
-static void convolve_bi_avg_horiz_4_dspr2(const uint8_t *src,
-                                          int32_t src_stride,
-                                          uint8_t *dst,
-                                          int32_t dst_stride,
-                                          const int16_t *filter_x0,
-                                          int32_t h) {
-  int32_t y;
-  uint8_t *cm = vpx_ff_cropTbl;
-  int32_t  Temp1, Temp2, Temp3, Temp4;
-  uint32_t vector4a = 64;
-  uint32_t tp1, tp2;
-  uint32_t p1, p2, p3;
-  uint32_t tn1, tn2;
-  const int16_t *filter = &filter_x0[3];
-  uint32_t      filter45;
-
-  filter45 = ((const int32_t *)filter)[0];
-
-  for (y = h; y--;) {
-    /* prefetch data to cache memory */
-    prefetch_load(src + src_stride);
-    prefetch_load(src + src_stride + 32);
-    prefetch_store(dst + dst_stride);
-
-    __asm__ __volatile__ (
-        "ulw              %[tp1],         0(%[src])                      \n\t"
-        "ulw              %[tp2],         4(%[src])                      \n\t"
-
-        /* even 1. pixel */
-        "mtlo             %[vector4a],    $ac3                           \n\t"
-        "mthi             $zero,          $ac3                           \n\t"
-        "preceu.ph.qbr    %[p1],          %[tp1]                         \n\t"
-        "preceu.ph.qbl    %[p2],          %[tp1]                         \n\t"
-        "dpa.w.ph         $ac3,           %[p1],          %[filter45]    \n\t"
-        "extp             %[Temp1],       $ac3,           31             \n\t"
-
-        /* even 2. pixel */
-        "mtlo             %[vector4a],    $ac2                           \n\t"
-        "mthi             $zero,          $ac2                           \n\t"
-        "balign           %[tp2],         %[tp1],         3              \n\t"
-        "dpa.w.ph         $ac2,           %[p2],          %[filter45]    \n\t"
-        "extp             %[Temp3],       $ac2,           31             \n\t"
-
-        "lbu              %[p2],          3(%[dst])                      \n\t"  /* load odd 2 */
-
-        /* odd 1. pixel */
-        "lbux             %[tp1],         %[Temp1](%[cm])                \n\t"  /* even 1 */
-        "mtlo             %[vector4a],    $ac3                           \n\t"
-        "mthi             $zero,          $ac3                           \n\t"
-        "lbu              %[Temp1],       1(%[dst])                      \n\t"  /* load odd 1 */
-        "preceu.ph.qbr    %[p1],          %[tp2]                         \n\t"
-        "preceu.ph.qbl    %[p3],          %[tp2]                         \n\t"
-        "dpa.w.ph         $ac3,           %[p1],          %[filter45]    \n\t"
-        "extp             %[Temp2],       $ac3,           31             \n\t"
-
-        "lbu              %[tn2],         0(%[dst])                      \n\t"  /* load even 1 */
-
-        /* odd 2. pixel */
-        "lbux             %[tp2],         %[Temp3](%[cm])                \n\t"  /* even 2 */
-        "mtlo             %[vector4a],    $ac2                           \n\t"
-        "mthi             $zero,          $ac2                           \n\t"
-        "lbux             %[tn1],         %[Temp2](%[cm])                \n\t"  /* odd 1 */
-        "addqh_r.w        %[tn2],         %[tn2],         %[tp1]         \n\t"  /* average even 1 */
-        "dpa.w.ph         $ac2,           %[p3],          %[filter45]    \n\t"
-        "extp             %[Temp4],       $ac2,           31             \n\t"
-
-        "lbu              %[tp1],         2(%[dst])                      \n\t"  /* load even 2 */
-        "sb               %[tn2],         0(%[dst])                      \n\t"  /* store even 1 */
-
-        /* clamp */
-        "addqh_r.w        %[Temp1],       %[Temp1],       %[tn1]         \n\t"  /* average odd 1 */
-        "lbux             %[p3],          %[Temp4](%[cm])                \n\t"  /* odd 2 */
-        "sb               %[Temp1],       1(%[dst])                      \n\t"  /* store odd 1 */
-
-        "addqh_r.w        %[tp1],         %[tp1],         %[tp2]         \n\t"  /* average even 2 */
-        "sb               %[tp1],         2(%[dst])                      \n\t"  /* store even 2 */
-
-        "addqh_r.w        %[p2],          %[p2],          %[p3]          \n\t"  /* average odd 2 */
-        "sb               %[p2],          3(%[dst])                      \n\t"  /* store odd 2 */
-
-        : [tp1] "=&r" (tp1), [tp2] "=&r" (tp2),
-          [tn1] "=&r" (tn1), [tn2] "=&r" (tn2),
-          [p1] "=&r" (p1), [p2] "=&r" (p2), [p3] "=&r" (p3),
-          [Temp1] "=&r" (Temp1), [Temp2] "=&r" (Temp2),
-          [Temp3] "=&r" (Temp3), [Temp4] "=&r" (Temp4)
-        : [filter45] "r" (filter45), [vector4a] "r" (vector4a),
-          [cm] "r" (cm), [dst] "r" (dst), [src] "r" (src)
-    );
-
-    /* Next row... */
-    src += src_stride;
-    dst += dst_stride;
-  }
-}
-
-static void convolve_bi_avg_horiz_8_dspr2(const uint8_t *src,
-                                         int32_t src_stride,
-                                         uint8_t *dst,
-                                         int32_t dst_stride,
-                                         const int16_t *filter_x0,
-                                         int32_t h) {
-  int32_t y;
-  uint8_t *cm = vpx_ff_cropTbl;
-  uint32_t vector4a = 64;
-  int32_t Temp1, Temp2, Temp3;
-  uint32_t tp1, tp2, tp3, tp4;
-  uint32_t p1, p2, p3, p4, n1;
-  uint32_t st0, st1;
-  const int16_t *filter = &filter_x0[3];
-  uint32_t filter45;;
-
-  filter45 = ((const int32_t *)filter)[0];
-
-  for (y = h; y--;) {
-    /* prefetch data to cache memory */
-    prefetch_load(src + src_stride);
-    prefetch_load(src + src_stride + 32);
-    prefetch_store(dst + dst_stride);
-
-    __asm__ __volatile__ (
-        "ulw              %[tp1],         0(%[src])                      \n\t"
-        "ulw              %[tp2],         4(%[src])                      \n\t"
-
-        /* even 1. pixel */
-        "mtlo             %[vector4a],    $ac3                           \n\t"
-        "mthi             $zero,          $ac3                           \n\t"
-        "mtlo             %[vector4a],    $ac2                           \n\t"
-        "mthi             $zero,          $ac2                           \n\t"
-        "preceu.ph.qbr    %[p1],          %[tp1]                         \n\t"
-        "preceu.ph.qbl    %[p2],          %[tp1]                         \n\t"
-        "preceu.ph.qbr    %[p3],          %[tp2]                         \n\t"
-        "preceu.ph.qbl    %[p4],          %[tp2]                         \n\t"
-        "ulw              %[tp3],         8(%[src])                      \n\t"
-        "dpa.w.ph         $ac3,           %[p1],          %[filter45]    \n\t"
-        "extp             %[Temp1],       $ac3,           31             \n\t"
-        "lbu              %[Temp2],       0(%[dst])                      \n\t"
-        "lbu              %[tp4],         2(%[dst])                      \n\t"
-
-        /* even 2. pixel */
-        "dpa.w.ph         $ac2,           %[p2],          %[filter45]    \n\t"
-        "extp             %[Temp3],       $ac2,           31             \n\t"
-
-        /* even 3. pixel */
-        "lbux             %[st0],         %[Temp1](%[cm])                \n\t"
-        "mtlo             %[vector4a],    $ac1                           \n\t"
-        "mthi             $zero,          $ac1                           \n\t"
-        "lbux             %[st1],         %[Temp3](%[cm])                \n\t"
-        "dpa.w.ph         $ac1,           %[p3],          %[filter45]    \n\t"
-        "extp             %[Temp1],       $ac1,           31             \n\t"
-
-        "addqh_r.w        %[Temp2],       %[Temp2],       %[st0]         \n\t"
-        "addqh_r.w        %[tp4],         %[tp4],         %[st1]         \n\t"
-        "sb               %[Temp2],       0(%[dst])                      \n\t"
-        "sb               %[tp4],         2(%[dst])                      \n\t"
-
-        /* even 4. pixel */
-        "mtlo             %[vector4a],    $ac2                           \n\t"
-        "mthi             $zero,          $ac2                           \n\t"
-        "mtlo             %[vector4a],    $ac3                           \n\t"
-        "mthi             $zero,          $ac3                           \n\t"
-
-        "balign           %[tp3],         %[tp2],         3              \n\t"
-        "balign           %[tp2],         %[tp1],         3              \n\t"
-
-        "lbux             %[st0],         %[Temp1](%[cm])                \n\t"
-        "lbu              %[Temp2],       4(%[dst])                      \n\t"
-        "addqh_r.w        %[Temp2],       %[Temp2],       %[st0]         \n\t"
-
-        "dpa.w.ph         $ac2,           %[p4],          %[filter45]    \n\t"
-        "extp             %[Temp3],       $ac2,           31             \n\t"
-
-        /* odd 1. pixel */
-        "mtlo             %[vector4a],    $ac1                           \n\t"
-        "mthi             $zero,          $ac1                           \n\t"
-        "sb               %[Temp2],       4(%[dst])                      \n\t"
-        "preceu.ph.qbr    %[p1],          %[tp2]                         \n\t"
-        "preceu.ph.qbl    %[p2],          %[tp2]                         \n\t"
-        "preceu.ph.qbr    %[p3],          %[tp3]                         \n\t"
-        "preceu.ph.qbl    %[p4],          %[tp3]                         \n\t"
-        "dpa.w.ph         $ac3,           %[p1],          %[filter45]    \n\t"
-        "extp             %[Temp2],       $ac3,           31             \n\t"
-
-        "lbu              %[tp1],         6(%[dst])                      \n\t"
-
-        /* odd 2. pixel */
-        "mtlo             %[vector4a],    $ac3                           \n\t"
-        "mthi             $zero,          $ac3                           \n\t"
-        "mtlo             %[vector4a],    $ac2                           \n\t"
-        "mthi             $zero,          $ac2                           \n\t"
-        "lbux             %[st0],         %[Temp3](%[cm])                \n\t"
-        "dpa.w.ph         $ac1,           %[p2],          %[filter45]    \n\t"
-        "extp             %[Temp3],       $ac1,           31             \n\t"
-
-        "lbu              %[tp2],         1(%[dst])                      \n\t"
-        "lbu              %[tp3],         3(%[dst])                      \n\t"
-        "addqh_r.w        %[tp1],         %[tp1],         %[st0]         \n\t"
-
-        /* odd 3. pixel */
-        "lbux             %[st1],         %[Temp2](%[cm])                \n\t"
-        "dpa.w.ph         $ac3,           %[p3],          %[filter45]    \n\t"
-        "addqh_r.w        %[tp2],         %[tp2],         %[st1]         \n\t"
-        "extp             %[Temp2],       $ac3,           31             \n\t"
-
-        "lbu              %[tp4],         5(%[dst])                      \n\t"
-
-        /* odd 4. pixel */
-        "sb               %[tp2],         1(%[dst])                      \n\t"
-        "sb               %[tp1],         6(%[dst])                      \n\t"
-        "dpa.w.ph         $ac2,           %[p4],          %[filter45]    \n\t"
-        "extp             %[Temp1],       $ac2,           31             \n\t"
-
-        "lbu              %[tp1],         7(%[dst])                      \n\t"
-
-        /* clamp */
-        "lbux             %[p4],          %[Temp3](%[cm])                \n\t"
-        "addqh_r.w        %[tp3],         %[tp3],         %[p4]          \n\t"
-
-        "lbux             %[p2],          %[Temp2](%[cm])                \n\t"
-        "addqh_r.w        %[tp4],         %[tp4],         %[p2]          \n\t"
-
-        "lbux             %[p1],          %[Temp1](%[cm])                \n\t"
-        "addqh_r.w        %[tp1],         %[tp1],         %[p1]          \n\t"
-
-        /* store bytes */
-        "sb               %[tp3],         3(%[dst])                      \n\t"
-        "sb               %[tp4],         5(%[dst])                      \n\t"
-        "sb               %[tp1],         7(%[dst])                      \n\t"
-
-        : [tp1] "=&r" (tp1), [tp2] "=&r" (tp2),
-          [tp3] "=&r" (tp3), [tp4] "=&r" (tp4),
-          [st0] "=&r" (st0), [st1] "=&r" (st1),
-          [p1] "=&r" (p1), [p2] "=&r" (p2), [p3] "=&r" (p3), [p4] "=&r" (p4),
-          [n1] "=&r" (n1),
-          [Temp1] "=&r" (Temp1), [Temp2] "=&r" (Temp2), [Temp3] "=&r" (Temp3)
-        : [filter45] "r" (filter45), [vector4a] "r" (vector4a),
-          [cm] "r" (cm), [dst] "r" (dst), [src] "r" (src)
-    );
-
-    /* Next row... */
-    src += src_stride;
-    dst += dst_stride;
-  }
-}
-
-static void convolve_bi_avg_horiz_16_dspr2(const uint8_t *src_ptr,
-                                          int32_t src_stride,
-                                          uint8_t *dst_ptr,
-                                          int32_t dst_stride,
-                                          const int16_t *filter_x0,
-                                          int32_t h,
-                                          int32_t count) {
-  int32_t y, c;
-  const uint8_t *src;
-  uint8_t *dst;
-  uint8_t *cm = vpx_ff_cropTbl;
-  uint32_t vector_64 = 64;
-  int32_t Temp1, Temp2, Temp3;
-  uint32_t qload1, qload2, qload3;
-  uint32_t p1, p2, p3, p4, p5;
-  uint32_t st1, st2, st3;
-  const int16_t *filter = &filter_x0[3];
-  uint32_t filter45;;
-
-  filter45 = ((const int32_t *)filter)[0];
-
-  for (y = h; y--;) {
-    src = src_ptr;
-    dst = dst_ptr;
-
-    /* prefetch data to cache memory */
-    prefetch_load(src_ptr + src_stride);
-    prefetch_load(src_ptr + src_stride + 32);
-    prefetch_store(dst_ptr + dst_stride);
-
-    for (c = 0; c < count; c++) {
-      __asm__ __volatile__ (
-          "ulw              %[qload1],    0(%[src])                    \n\t"
-          "ulw              %[qload2],    4(%[src])                    \n\t"
-
-          /* even 1. pixel */
-          "mtlo             %[vector_64], $ac1                         \n\t" /* even 1 */
-          "mthi             $zero,        $ac1                         \n\t"
-          "mtlo             %[vector_64], $ac2                         \n\t" /* even 2 */
-          "mthi             $zero,        $ac2                         \n\t"
-          "preceu.ph.qbr    %[p1],        %[qload1]                    \n\t"
-          "preceu.ph.qbl    %[p2],        %[qload1]                    \n\t"
-          "preceu.ph.qbr    %[p3],        %[qload2]                    \n\t"
-          "preceu.ph.qbl    %[p4],        %[qload2]                    \n\t"
-          "ulw              %[qload3],    8(%[src])                    \n\t"
-          "dpa.w.ph         $ac1,         %[p1],          %[filter45]  \n\t" /* even 1 */
-          "extp             %[Temp1],     $ac1,           31           \n\t" /* even 1 */
-          "lbu              %[st2],       0(%[dst])                    \n\t" /* load even 1 from dst */
-
-          /* even 2. pixel */
-          "mtlo             %[vector_64], $ac3                         \n\t" /* even 3 */
-          "mthi             $zero,        $ac3                         \n\t"
-          "preceu.ph.qbr    %[p1],        %[qload3]                    \n\t"
-          "preceu.ph.qbl    %[p5],        %[qload3]                    \n\t"
-          "ulw              %[qload1],    12(%[src])                   \n\t"
-          "dpa.w.ph         $ac2,         %[p2],          %[filter45]  \n\t" /* even 1 */
-          "extp             %[Temp2],     $ac2,           31           \n\t" /* even 1 */
-          "lbux             %[st1],       %[Temp1](%[cm])              \n\t" /* even 1 */
-
-          "lbu              %[qload3],    2(%[dst])                    \n\t" /* load even 2 from dst */
-
-          /* even 3. pixel */
-          "mtlo             %[vector_64], $ac1                         \n\t" /* even 4 */
-          "mthi             $zero,        $ac1                         \n\t"
-          "addqh_r.w        %[st2],       %[st2],         %[st1]       \n\t" /* average even 1 */
-          "preceu.ph.qbr    %[p2],        %[qload1]                    \n\t"
-          "sb               %[st2],       0(%[dst])                    \n\t" /* store even 1 to dst */
-          "dpa.w.ph         $ac3,         %[p3],          %[filter45]  \n\t" /* even 3 */
-          "extp             %[Temp3],     $ac3,           31           \n\t" /* even 3 */
-          "lbux             %[st2],       %[Temp2](%[cm])              \n\t" /* even 1 */
-
-          /* even 4. pixel */
-          "mtlo             %[vector_64], $ac2                         \n\t" /* even 5 */
-          "mthi             $zero,        $ac2                         \n\t"
-          "addqh_r.w        %[qload3],    %[qload3],      %[st2]       \n\t" /* average even 2 */
-          "preceu.ph.qbl    %[p3],        %[qload1]                    \n\t"
-          "sb               %[qload3],    2(%[dst])                    \n\t" /* store even 2 to dst */
-          "lbu              %[qload3],    4(%[dst])                    \n\t" /* load even 3 from dst */
-          "lbu              %[qload1],    6(%[dst])                    \n\t" /* load even 4 from dst */
-          "dpa.w.ph         $ac1,         %[p4],          %[filter45]  \n\t" /* even 4 */
-          "extp             %[Temp1],     $ac1,           31           \n\t" /* even 4 */
-          "lbux             %[st3],       %[Temp3](%[cm])              \n\t" /* even 3 */
-
-          /* even 5. pixel */
-          "mtlo             %[vector_64], $ac3                         \n\t" /* even 6 */
-          "mthi             $zero,        $ac3                         \n\t"
-          "addqh_r.w        %[qload3],    %[qload3],      %[st3]       \n\t" /* average even 3 */
-          "sb               %[qload3],    4(%[dst])                    \n\t" /* store even 3 to dst */
-          "dpa.w.ph         $ac2,         %[p1],          %[filter45]  \n\t" /* even 5 */
-          "extp             %[Temp2],     $ac2,           31           \n\t" /* even 5 */
-          "lbux             %[st1],       %[Temp1](%[cm])              \n\t" /* even 4 */
-
-          /* even 6. pixel */
-          "mtlo             %[vector_64], $ac1                         \n\t" /* even 7 */
-          "mthi             $zero,        $ac1                         \n\t"
-          "addqh_r.w        %[qload1],    %[qload1],      %[st1]       \n\t" /* average even 4 */
-          "sb               %[qload1],    6(%[dst])                    \n\t" /* store even 4 to dst */
-          "dpa.w.ph         $ac3,         %[p5],          %[filter45]  \n\t" /* even 6 */
-          "lbu              %[qload2],    8(%[dst])                    \n\t" /* load even 5 from dst */
-          "extp             %[Temp3],     $ac3,           31           \n\t" /* even 6 */
-          "lbux             %[st2],       %[Temp2](%[cm])              \n\t" /* even 5 */
-
-          /* even 7. pixel */
-          "mtlo             %[vector_64], $ac2                         \n\t" /* even 8 */
-          "mthi             $zero,        $ac2                         \n\t"
-          "addqh_r.w        %[qload2],    %[qload2],      %[st2]       \n\t" /* average even 5 */
-          "sb               %[qload2],    8(%[dst])                    \n\t" /* store even 5 to dst */
-          "dpa.w.ph         $ac1,         %[p2],          %[filter45]  \n\t" /* even 7 */
-          "lbu              %[qload3],    10(%[dst])                   \n\t" /* load even 6 from dst */
-          "extp             %[Temp1],     $ac1,           31           \n\t" /* even 7 */
-          "lbux             %[st3],       %[Temp3](%[cm])              \n\t" /* even 6 */
-
-          "lbu              %[st2],       12(%[dst])                   \n\t" /* load even 7 from dst */
-
-          /* even 8. pixel */
-          "mtlo             %[vector_64], $ac3                         \n\t" /* odd 1 */
-          "mthi             $zero,        $ac3                         \n\t"
-          "addqh_r.w        %[qload3],    %[qload3],      %[st3]       \n\t" /* average even 6 */
-          "dpa.w.ph         $ac2,         %[p3],          %[filter45]  \n\t" /* even 8 */
-          "sb               %[qload3],    10(%[dst])                   \n\t" /* store even 6 to dst */
-          "extp             %[Temp2],     $ac2,           31           \n\t" /* even 8 */
-          "lbux             %[st1],       %[Temp1](%[cm])              \n\t" /* even 7 */
-
-          /* ODD pixels */
-          "ulw              %[qload1],    1(%[src])                   \n\t"
-          "ulw              %[qload2],    5(%[src])                    \n\t"
-
-          "addqh_r.w        %[st2],       %[st2],         %[st1]       \n\t" /* average even 7 */
-
-          /* odd 1. pixel */
-          "mtlo             %[vector_64], $ac1                         \n\t" /* odd 2 */
-          "mthi             $zero,        $ac1                         \n\t"
-          "preceu.ph.qbr    %[p1],        %[qload1]                    \n\t"
-          "preceu.ph.qbl    %[p2],        %[qload1]                    \n\t"
-          "preceu.ph.qbr    %[p3],        %[qload2]                    \n\t"
-          "preceu.ph.qbl    %[p4],        %[qload2]                    \n\t"
-          "sb               %[st2],       12(%[dst])                   \n\t" /* store even 7 to dst */
-          "ulw              %[qload3],    9(%[src])                    \n\t"
-          "dpa.w.ph         $ac3,         %[p1],          %[filter45]  \n\t" /* odd 1 */
-          "lbu              %[qload2],    14(%[dst])                   \n\t" /* load even 8 from dst */
-          "extp             %[Temp3],     $ac3,           31           \n\t" /* odd 1 */
-          "lbux             %[st2],       %[Temp2](%[cm])              \n\t" /* even 8 */
-
-          "lbu              %[st1],       1(%[dst])                    \n\t" /* load odd 1 from dst */
-
-          /* odd 2. pixel */
-          "mtlo             %[vector_64], $ac2                         \n\t" /* odd 3 */
-          "mthi             $zero,        $ac2                         \n\t"
-          "addqh_r.w        %[qload2],    %[qload2],      %[st2]       \n\t" /* average even 8 */
-          "preceu.ph.qbr    %[p1],        %[qload3]                    \n\t"
-          "preceu.ph.qbl    %[p5],        %[qload3]                    \n\t"
-          "sb               %[qload2],    14(%[dst])                   \n\t" /* store even 8 to dst */
-          "ulw              %[qload1],    13(%[src])                   \n\t"
-          "dpa.w.ph         $ac1,         %[p2],          %[filter45]  \n\t" /* odd 2 */
-          "lbu              %[qload3],    3(%[dst])                    \n\t" /* load odd 2 from dst */
-          "extp             %[Temp1],     $ac1,           31           \n\t" /* odd 2 */
-          "lbux             %[st3],       %[Temp3](%[cm])              \n\t" /* odd 1 */
-
-          /* odd 3. pixel */
-          "mtlo             %[vector_64], $ac3                         \n\t" /* odd 4 */
-          "mthi             $zero,        $ac3                         \n\t"
-          "addqh_r.w        %[st3],       %[st3],         %[st1]       \n\t" /* average odd 1 */
-          "preceu.ph.qbr    %[p2],        %[qload1]                    \n\t"
-          "dpa.w.ph         $ac2,         %[p3],          %[filter45]  \n\t" /* odd 3 */
-          "sb               %[st3],       1(%[dst])                    \n\t" /* store odd 1 to dst */
-          "extp             %[Temp2],     $ac2,           31           \n\t" /* odd 3 */
-          "lbux             %[st1],       %[Temp1](%[cm])              \n\t" /* odd 2 */
-
-          /* odd 4. pixel */
-          "mtlo             %[vector_64], $ac1                         \n\t" /* odd 5 */
-          "mthi             $zero,        $ac1                         \n\t"
-          "addqh_r.w        %[qload3],    %[qload3],      %[st1]       \n\t" /* average odd 2 */
-          "preceu.ph.qbl    %[p3],        %[qload1]                    \n\t"
-          "sb               %[qload3],    3(%[dst])                    \n\t" /* store odd 2 to dst */
-          "lbu              %[qload1],    5(%[dst])                    \n\t" /* load odd 3 from dst */
-          "dpa.w.ph         $ac3,         %[p4],          %[filter45]  \n\t" /* odd 4 */
-          "extp             %[Temp3],     $ac3,           31           \n\t" /* odd 4 */
-          "lbux             %[st2],       %[Temp2](%[cm])              \n\t" /* odd 3 */
-
-          "lbu              %[st1],       7(%[dst])                    \n\t" /* load odd 4 from dst */
-
-          /* odd 5. pixel */
-          "mtlo             %[vector_64], $ac2                         \n\t" /* odd 6 */
-          "mthi             $zero,        $ac2                         \n\t"
-          "addqh_r.w        %[qload1],    %[qload1],      %[st2]       \n\t" /* average odd 3 */
-          "sb               %[qload1],    5(%[dst])                    \n\t" /* store odd 3 to dst */
-          "dpa.w.ph         $ac1,         %[p1],          %[filter45]  \n\t" /* odd 5 */
-          "extp             %[Temp1],     $ac1,           31           \n\t" /* odd 5 */
-          "lbux             %[st3],       %[Temp3](%[cm])              \n\t" /* odd 4 */
-
-          "lbu              %[qload1],    9(%[dst])                    \n\t" /* load odd 5 from dst */
-
-          /* odd 6. pixel */
-          "mtlo             %[vector_64], $ac3                         \n\t" /* odd 7 */
-          "mthi             $zero,        $ac3                         \n\t"
-          "addqh_r.w        %[st1],       %[st1],         %[st3]       \n\t" /* average odd 4 */
-          "sb               %[st1],       7(%[dst])                    \n\t" /* store odd 4 to dst */
-          "dpa.w.ph         $ac2,         %[p5],          %[filter45]  \n\t" /* odd 6 */
-          "extp             %[Temp2],     $ac2,           31           \n\t" /* odd 6 */
-          "lbux             %[st1],       %[Temp1](%[cm])              \n\t" /* odd 5 */
-
-          /* odd 7. pixel */
-          "mtlo             %[vector_64], $ac1                         \n\t" /* odd 8 */
-          "mthi             $zero,        $ac1                         \n\t"
-          "addqh_r.w        %[qload1],    %[qload1],      %[st1]       \n\t" /* average odd 5 */
-          "sb               %[qload1],    9(%[dst])                    \n\t" /* store odd 5 to dst */
-          "lbu              %[qload2],    11(%[dst])                   \n\t" /* load odd 6 from dst */
-          "dpa.w.ph         $ac3,         %[p2],          %[filter45]  \n\t" /* odd 7 */
-          "extp             %[Temp3],     $ac3,           31           \n\t" /* odd 7 */
-
-          "lbu              %[qload3],    13(%[dst])                   \n\t" /* load odd 7 from dst */
-
-          /* odd 8. pixel */
-          "dpa.w.ph         $ac1,         %[p3],          %[filter45]  \n\t" /* odd 8 */
-          "extp             %[Temp1],     $ac1,           31           \n\t" /* odd 8 */
-
-          "lbu              %[qload1],    15(%[dst])                   \n\t" /* load odd 8 from dst */
-
-          "lbux             %[st2],       %[Temp2](%[cm])              \n\t" /* odd 6 */
-          "addqh_r.w        %[qload2],    %[qload2],      %[st2]       \n\t" /* average odd 6 */
-
-          "lbux             %[st3],       %[Temp3](%[cm])              \n\t" /* odd 7 */
-          "addqh_r.w        %[qload3],    %[qload3],      %[st3]       \n\t" /* average odd 7 */
-
-          "lbux             %[st1],       %[Temp1](%[cm])              \n\t" /* odd 8 */
-          "addqh_r.w        %[qload1],    %[qload1],      %[st1]       \n\t" /* average odd 8 */
-
-          "sb               %[qload2],    11(%[dst])                   \n\t" /* store odd 6 to dst */
-          "sb               %[qload3],    13(%[dst])                   \n\t" /* store odd 7 to dst */
-          "sb               %[qload1],    15(%[dst])                   \n\t" /* store odd 8 to dst */
-
-          : [qload1] "=&r" (qload1), [qload2] "=&r" (qload2),
-            [st1] "=&r" (st1), [st2] "=&r" (st2), [st3] "=&r" (st3),
-            [p1] "=&r" (p1), [p2] "=&r" (p2), [p3] "=&r" (p3), [p4] "=&r" (p4),
-            [qload3] "=&r" (qload3), [p5] "=&r" (p5),
-            [Temp1] "=&r" (Temp1), [Temp2] "=&r" (Temp2), [Temp3] "=&r" (Temp3)
-          : [filter45] "r" (filter45), [vector_64] "r" (vector_64),
-            [cm] "r" (cm), [dst] "r" (dst), [src] "r" (src)
-      );
-
-      src += 16;
-      dst += 16;
-    }
-
-    /* Next row... */
-    src_ptr += src_stride;
-    dst_ptr += dst_stride;
-  }
-}
-
-static void convolve_bi_avg_horiz_64_dspr2(const uint8_t *src_ptr,
-                                          int32_t src_stride,
-                                          uint8_t *dst_ptr,
-                                          int32_t dst_stride,
-                                          const int16_t *filter_x0,
-                                          int32_t h) {
-  int32_t y, c;
-  const uint8_t *src;
-  uint8_t *dst;
-  uint8_t *cm = vpx_ff_cropTbl;
-  uint32_t vector_64 = 64;
-  int32_t Temp1, Temp2, Temp3;
-  uint32_t qload1, qload2, qload3;
-  uint32_t p1, p2, p3, p4, p5;
-  uint32_t st1, st2, st3;
-  const int16_t *filter = &filter_x0[3];
-  uint32_t filter45;;
-
-  filter45 = ((const int32_t *)filter)[0];
-
-  for (y = h; y--;) {
-    src = src_ptr;
-    dst = dst_ptr;
-
-    /* prefetch data to cache memory */
-    prefetch_load(src_ptr + src_stride);
-    prefetch_load(src_ptr + src_stride + 32);
-    prefetch_load(src_ptr + src_stride + 64);
-    prefetch_store(dst_ptr + dst_stride);
-    prefetch_store(dst_ptr + dst_stride + 32);
-
-    for (c = 0; c < 4; c++) {
-      __asm__ __volatile__ (
-          "ulw              %[qload1],    0(%[src])                    \n\t"
-          "ulw              %[qload2],    4(%[src])                    \n\t"
-
-          /* even 1. pixel */
-          "mtlo             %[vector_64], $ac1                         \n\t" /* even 1 */
-          "mthi             $zero,        $ac1                         \n\t"
-          "mtlo             %[vector_64], $ac2                         \n\t" /* even 2 */
-          "mthi             $zero,        $ac2                         \n\t"
-          "preceu.ph.qbr    %[p1],        %[qload1]                    \n\t"
-          "preceu.ph.qbl    %[p2],        %[qload1]                    \n\t"
-          "preceu.ph.qbr    %[p3],        %[qload2]                    \n\t"
-          "preceu.ph.qbl    %[p4],        %[qload2]                    \n\t"
-          "ulw              %[qload3],    8(%[src])                    \n\t"
-          "dpa.w.ph         $ac1,         %[p1],          %[filter45]  \n\t" /* even 1 */
-          "extp             %[Temp1],     $ac1,           31           \n\t" /* even 1 */
-          "lbu              %[st2],       0(%[dst])                    \n\t" /* load even 1 from dst */
-
-          /* even 2. pixel */
-          "mtlo             %[vector_64], $ac3                         \n\t" /* even 3 */
-          "mthi             $zero,        $ac3                         \n\t"
-          "preceu.ph.qbr    %[p1],        %[qload3]                    \n\t"
-          "preceu.ph.qbl    %[p5],        %[qload3]                    \n\t"
-          "ulw              %[qload1],    12(%[src])                   \n\t"
-          "dpa.w.ph         $ac2,         %[p2],          %[filter45]  \n\t" /* even 1 */
-          "extp             %[Temp2],     $ac2,           31           \n\t" /* even 1 */
-          "lbux             %[st1],       %[Temp1](%[cm])              \n\t" /* even 1 */
-
-          "lbu              %[qload3],    2(%[dst])                    \n\t" /* load even 2 from dst */
-
-          /* even 3. pixel */
-          "mtlo             %[vector_64], $ac1                         \n\t" /* even 4 */
-          "mthi             $zero,        $ac1                         \n\t"
-          "addqh_r.w        %[st2],       %[st2],         %[st1]       \n\t" /* average even 1 */
-          "preceu.ph.qbr    %[p2],        %[qload1]                    \n\t"
-          "sb               %[st2],       0(%[dst])                    \n\t" /* store even 1 to dst */
-          "dpa.w.ph         $ac3,         %[p3],          %[filter45]  \n\t" /* even 3 */
-          "extp             %[Temp3],     $ac3,           31           \n\t" /* even 3 */
-          "lbux             %[st2],       %[Temp2](%[cm])              \n\t" /* even 1 */
-
-          /* even 4. pixel */
-          "mtlo             %[vector_64], $ac2                         \n\t" /* even 5 */
-          "mthi             $zero,        $ac2                         \n\t"
-          "addqh_r.w        %[qload3],    %[qload3],      %[st2]       \n\t" /* average even 2 */
-          "preceu.ph.qbl    %[p3],        %[qload1]                    \n\t"
-          "sb               %[qload3],    2(%[dst])                    \n\t" /* store even 2 to dst */
-          "lbu              %[qload3],    4(%[dst])                    \n\t" /* load even 3 from dst */
-          "lbu              %[qload1],    6(%[dst])                    \n\t" /* load even 4 from dst */
-          "dpa.w.ph         $ac1,         %[p4],          %[filter45]  \n\t" /* even 4 */
-          "extp             %[Temp1],     $ac1,           31           \n\t" /* even 4 */
-          "lbux             %[st3],       %[Temp3](%[cm])              \n\t" /* even 3 */
-
-          /* even 5. pixel */
-          "mtlo             %[vector_64], $ac3                         \n\t" /* even 6 */
-          "mthi             $zero,        $ac3                         \n\t"
-          "addqh_r.w        %[qload3],    %[qload3],      %[st3]       \n\t" /* average even 3 */
-          "sb               %[qload3],    4(%[dst])                    \n\t" /* store even 3 to dst */
-          "dpa.w.ph         $ac2,         %[p1],          %[filter45]  \n\t" /* even 5 */
-          "extp             %[Temp2],     $ac2,           31           \n\t" /* even 5 */
-          "lbux             %[st1],       %[Temp1](%[cm])              \n\t" /* even 4 */
-
-          /* even 6. pixel */
-          "mtlo             %[vector_64], $ac1                         \n\t" /* even 7 */
-          "mthi             $zero,        $ac1                         \n\t"
-          "addqh_r.w        %[qload1],    %[qload1],      %[st1]       \n\t" /* average even 4 */
-          "sb               %[qload1],    6(%[dst])                    \n\t" /* store even 4 to dst */
-          "dpa.w.ph         $ac3,         %[p5],          %[filter45]  \n\t" /* even 6 */
-          "lbu              %[qload2],    8(%[dst])                    \n\t" /* load even 5 from dst */
-          "extp             %[Temp3],     $ac3,           31           \n\t" /* even 6 */
-          "lbux             %[st2],       %[Temp2](%[cm])              \n\t" /* even 5 */
-
-          /* even 7. pixel */
-          "mtlo             %[vector_64], $ac2                         \n\t" /* even 8 */
-          "mthi             $zero,        $ac2                         \n\t"
-          "addqh_r.w        %[qload2],    %[qload2],      %[st2]       \n\t" /* average even 5 */
-          "sb               %[qload2],    8(%[dst])                    \n\t" /* store even 5 to dst */
-          "dpa.w.ph         $ac1,         %[p2],          %[filter45]  \n\t" /* even 7 */
-          "lbu              %[qload3],    10(%[dst])                   \n\t" /* load even 6 from dst */
-          "extp             %[Temp1],     $ac1,           31           \n\t" /* even 7 */
-          "lbux             %[st3],       %[Temp3](%[cm])              \n\t" /* even 6 */
-
-          "lbu              %[st2],       12(%[dst])                   \n\t" /* load even 7 from dst */
-
-          /* even 8. pixel */
-          "mtlo             %[vector_64], $ac3                         \n\t" /* odd 1 */
-          "mthi             $zero,        $ac3                         \n\t"
-          "addqh_r.w        %[qload3],    %[qload3],      %[st3]       \n\t" /* average even 6 */
-          "dpa.w.ph         $ac2,         %[p3],          %[filter45]  \n\t" /* even 8 */
-          "sb               %[qload3],    10(%[dst])                   \n\t" /* store even 6 to dst */
-          "extp             %[Temp2],     $ac2,           31           \n\t" /* even 8 */
-          "lbux             %[st1],       %[Temp1](%[cm])              \n\t" /* even 7 */
-
-          /* ODD pixels */
-          "ulw              %[qload1],    1(%[src])                   \n\t"
-          "ulw              %[qload2],    5(%[src])                    \n\t"
-
-          "addqh_r.w        %[st2],       %[st2],         %[st1]       \n\t" /* average even 7 */
-
-          /* odd 1. pixel */
-          "mtlo             %[vector_64], $ac1                         \n\t" /* odd 2 */
-          "mthi             $zero,        $ac1                         \n\t"
-          "preceu.ph.qbr    %[p1],        %[qload1]                    \n\t"
-          "preceu.ph.qbl    %[p2],        %[qload1]                    \n\t"
-          "preceu.ph.qbr    %[p3],        %[qload2]                    \n\t"
-          "preceu.ph.qbl    %[p4],        %[qload2]                    \n\t"
-          "sb               %[st2],       12(%[dst])                   \n\t" /* store even 7 to dst */
-          "ulw              %[qload3],    9(%[src])                    \n\t"
-          "dpa.w.ph         $ac3,         %[p1],          %[filter45]  \n\t" /* odd 1 */
-          "lbu              %[qload2],    14(%[dst])                   \n\t" /* load even 8 from dst */
-          "extp             %[Temp3],     $ac3,           31           \n\t" /* odd 1 */
-          "lbux             %[st2],       %[Temp2](%[cm])              \n\t" /* even 8 */
-
-          "lbu              %[st1],       1(%[dst])                    \n\t" /* load odd 1 from dst */
-
-          /* odd 2. pixel */
-          "mtlo             %[vector_64], $ac2                         \n\t" /* odd 3 */
-          "mthi             $zero,        $ac2                         \n\t"
-          "addqh_r.w        %[qload2],    %[qload2],      %[st2]       \n\t" /* average even 8 */
-          "preceu.ph.qbr    %[p1],        %[qload3]                    \n\t"
-          "preceu.ph.qbl    %[p5],        %[qload3]                    \n\t"
-          "sb               %[qload2],    14(%[dst])                   \n\t" /* store even 8 to dst */
-          "ulw              %[qload1],    13(%[src])                   \n\t"
-          "dpa.w.ph         $ac1,         %[p2],          %[filter45]  \n\t" /* odd 2 */
-          "lbu              %[qload3],    3(%[dst])                    \n\t" /* load odd 2 from dst */
-          "extp             %[Temp1],     $ac1,           31           \n\t" /* odd 2 */
-          "lbux             %[st3],       %[Temp3](%[cm])              \n\t" /* odd 1 */
-
-          /* odd 3. pixel */
-          "mtlo             %[vector_64], $ac3                         \n\t" /* odd 4 */
-          "mthi             $zero,        $ac3                         \n\t"
-          "addqh_r.w        %[st3],       %[st3],         %[st1]       \n\t" /* average odd 1 */
-          "preceu.ph.qbr    %[p2],        %[qload1]                    \n\t"
-          "dpa.w.ph         $ac2,         %[p3],          %[filter45]  \n\t" /* odd 3 */
-          "sb               %[st3],       1(%[dst])                    \n\t" /* store odd 1 to dst */
-          "extp             %[Temp2],     $ac2,           31           \n\t" /* odd 3 */
-          "lbux             %[st1],       %[Temp1](%[cm])              \n\t" /* odd 2 */
-
-          /* odd 4. pixel */
-          "mtlo             %[vector_64], $ac1                         \n\t" /* odd 5 */
-          "mthi             $zero,        $ac1                         \n\t"
-          "addqh_r.w        %[qload3],    %[qload3],      %[st1]       \n\t" /* average odd 2 */
-          "preceu.ph.qbl    %[p3],        %[qload1]                    \n\t"
-          "sb               %[qload3],    3(%[dst])                    \n\t" /* store odd 2 to dst */
-          "lbu              %[qload1],    5(%[dst])                    \n\t" /* load odd 3 from dst */
-          "dpa.w.ph         $ac3,         %[p4],          %[filter45]  \n\t" /* odd 4 */
-          "extp             %[Temp3],     $ac3,           31           \n\t" /* odd 4 */
-          "lbux             %[st2],       %[Temp2](%[cm])              \n\t" /* odd 3 */
-
-          "lbu              %[st1],       7(%[dst])                    \n\t" /* load odd 4 from dst */
-
-          /* odd 5. pixel */
-          "mtlo             %[vector_64], $ac2                         \n\t" /* odd 6 */
-          "mthi             $zero,        $ac2                         \n\t"
-          "addqh_r.w        %[qload1],    %[qload1],      %[st2]       \n\t" /* average odd 3 */
-          "sb               %[qload1],    5(%[dst])                    \n\t" /* store odd 3 to dst */
-          "dpa.w.ph         $ac1,         %[p1],          %[filter45]  \n\t" /* odd 5 */
-          "extp             %[Temp1],     $ac1,           31           \n\t" /* odd 5 */
-          "lbux             %[st3],       %[Temp3](%[cm])              \n\t" /* odd 4 */
-
-          "lbu              %[qload1],    9(%[dst])                    \n\t" /* load odd 5 from dst */
-
-          /* odd 6. pixel */
-          "mtlo             %[vector_64], $ac3                         \n\t" /* odd 7 */
-          "mthi             $zero,        $ac3                         \n\t"
-          "addqh_r.w        %[st1],       %[st1],         %[st3]       \n\t" /* average odd 4 */
-          "sb               %[st1],       7(%[dst])                    \n\t" /* store odd 4 to dst */
-          "dpa.w.ph         $ac2,         %[p5],          %[filter45]  \n\t" /* odd 6 */
-          "extp             %[Temp2],     $ac2,           31           \n\t" /* odd 6 */
-          "lbux             %[st1],       %[Temp1](%[cm])              \n\t" /* odd 5 */
-
-          /* odd 7. pixel */
-          "mtlo             %[vector_64], $ac1                         \n\t" /* odd 8 */
-          "mthi             $zero,        $ac1                         \n\t"
-          "addqh_r.w        %[qload1],    %[qload1],      %[st1]       \n\t" /* average odd 5 */
-          "sb               %[qload1],    9(%[dst])                    \n\t" /* store odd 5 to dst */
-          "lbu              %[qload2],    11(%[dst])                   \n\t" /* load odd 6 from dst */
-          "dpa.w.ph         $ac3,         %[p2],          %[filter45]  \n\t" /* odd 7 */
-          "extp             %[Temp3],     $ac3,           31           \n\t" /* odd 7 */
-
-          "lbu              %[qload3],    13(%[dst])                   \n\t" /* load odd 7 from dst */
-
-          /* odd 8. pixel */
-          "dpa.w.ph         $ac1,         %[p3],          %[filter45]  \n\t" /* odd 8 */
-          "extp             %[Temp1],     $ac1,           31           \n\t" /* odd 8 */
-
-          "lbu              %[qload1],    15(%[dst])                   \n\t" /* load odd 8 from dst */
-
-          "lbux             %[st2],       %[Temp2](%[cm])              \n\t" /* odd 6 */
-          "addqh_r.w        %[qload2],    %[qload2],      %[st2]       \n\t" /* average odd 6 */
-
-          "lbux             %[st3],       %[Temp3](%[cm])              \n\t" /* odd 7 */
-          "addqh_r.w        %[qload3],    %[qload3],      %[st3]       \n\t" /* average odd 7 */
-
-          "lbux             %[st1],       %[Temp1](%[cm])              \n\t" /* odd 8 */
-          "addqh_r.w        %[qload1],    %[qload1],      %[st1]       \n\t" /* average odd 8 */
-
-          "sb               %[qload2],    11(%[dst])                   \n\t" /* store odd 6 to dst */
-          "sb               %[qload3],    13(%[dst])                   \n\t" /* store odd 7 to dst */
-          "sb               %[qload1],    15(%[dst])                   \n\t" /* store odd 8 to dst */
-
-          : [qload1] "=&r" (qload1), [qload2] "=&r" (qload2),
-            [st1] "=&r" (st1), [st2] "=&r" (st2), [st3] "=&r" (st3),
-            [p1] "=&r" (p1), [p2] "=&r" (p2), [p3] "=&r" (p3), [p4] "=&r" (p4),
-            [qload3] "=&r" (qload3), [p5] "=&r" (p5),
-            [Temp1] "=&r" (Temp1), [Temp2] "=&r" (Temp2), [Temp3] "=&r" (Temp3)
-          : [filter45] "r" (filter45), [vector_64] "r" (vector_64),
-            [cm] "r" (cm), [dst] "r" (dst), [src] "r" (src)
-      );
-
-      src += 16;
-      dst += 16;
-    }
-
-    /* Next row... */
-    src_ptr += src_stride;
-    dst_ptr += dst_stride;
-  }
-}
-
-void vpx_convolve2_avg_horiz_dspr2(const uint8_t *src, ptrdiff_t src_stride,
-                                   uint8_t *dst, ptrdiff_t dst_stride,
-                                   const int16_t *filter_x, int x_step_q4,
-                                   const int16_t *filter_y, int y_step_q4,
-                                   int w, int h) {
-  if (16 == x_step_q4) {
-    uint32_t pos = 38;
-
-    /* bit positon for extract from acc */
-    __asm__ __volatile__ (
-      "wrdsp      %[pos],     1           \n\t"
-      :
-      : [pos] "r" (pos)
-    );
-
-    /* prefetch data to cache memory */
-    prefetch_load(src);
-    prefetch_load(src + 32);
-    prefetch_store(dst);
-
-    switch (w) {
-      case 4:
-        convolve_bi_avg_horiz_4_dspr2(src, src_stride,
-                                     dst, dst_stride,
-                                     filter_x, h);
-        break;
-      case 8:
-        convolve_bi_avg_horiz_8_dspr2(src, src_stride,
-                                     dst, dst_stride,
-                                     filter_x, h);
-        break;
-      case 16:
-        convolve_bi_avg_horiz_16_dspr2(src, src_stride,
-                                      dst, dst_stride,
-                                      filter_x, h, 1);
-        break;
-      case 32:
-        convolve_bi_avg_horiz_16_dspr2(src, src_stride,
-                                      dst, dst_stride,
-                                      filter_x, h, 2);
-        break;
-      case 64:
-        prefetch_load(src + 64);
-        prefetch_store(dst + 32);
-
-        convolve_bi_avg_horiz_64_dspr2(src, src_stride,
-                                      dst, dst_stride,
-                                      filter_x, h);
-        break;
-      default:
-        vpx_convolve8_avg_horiz_c(src, src_stride,
-                                  dst, dst_stride,
-                                  filter_x, x_step_q4,
-                                  filter_y, y_step_q4,
-                                  w, h);
-        break;
-    }
-  } else {
-    vpx_convolve8_avg_horiz_c(src, src_stride,
-                              dst, dst_stride,
-                              filter_x, x_step_q4,
-                              filter_y, y_step_q4,
-                              w, h);
-  }
-}
-#endif
--- a/vpx_dsp/mips/vpx_convolve2_dspr2.c
+++ /dev/null
@@ -1,782 +1,0 @@
-/*
- *  Copyright (c) 2013 The WebM project authors. All Rights Reserved.
- *
- *  Use of this source code is governed by a BSD-style license
- *  that can be found in the LICENSE file in the root of the source
- *  tree. An additional intellectual property rights grant can be found
- *  in the file PATENTS.  All contributing project authors may
- *  be found in the AUTHORS file in the root of the source tree.
- */
-
-#include <assert.h>
-#include <stdio.h>
-
-#include "./vpx_dsp_rtcd.h"
-#include "vpx_dsp/mips/vpx_common_dspr2.h"
-#include "vpx_dsp/vpx_dsp_common.h"
-#include "vpx_dsp/vpx_filter.h"
-#include "vpx_ports/mem.h"
-
-#if HAVE_DSPR2
-static void convolve_bi_horiz_4_transposed_dspr2(const uint8_t *src,
-                                                 int32_t src_stride,
-                                                 uint8_t *dst,
-                                                 int32_t dst_stride,
-                                                 const int16_t *filter_x0,
-                                                 int32_t h) {
-  int32_t       y;
-  uint8_t       *cm = vpx_ff_cropTbl;
-  uint8_t       *dst_ptr;
-  int32_t       Temp1, Temp2;
-  uint32_t      vector4a = 64;
-  uint32_t      tp1, tp2;
-  uint32_t      p1, p2;
-  const int16_t *filter = &filter_x0[3];
-  uint32_t      filter45;
-
-  filter45 = ((const int32_t *)filter)[0];
-
-  for (y = h; y--;) {
-    dst_ptr = dst;
-    /* prefetch data to cache memory */
-    prefetch_load(src + src_stride);
-    prefetch_load(src + src_stride + 32);
-
-    __asm__ __volatile__ (
-        "ulw              %[tp1],         0(%[src])                      \n\t"
-        "ulw              %[tp2],         4(%[src])                      \n\t"
-
-        /* even 1. pixel */
-        "mtlo             %[vector4a],    $ac3                           \n\t"
-        "mthi             $zero,          $ac3                           \n\t"
-        "preceu.ph.qbr    %[p1],          %[tp1]                         \n\t"
-        "preceu.ph.qbl    %[p2],          %[tp1]                         \n\t"
-        "dpa.w.ph         $ac3,           %[p1],          %[filter45]    \n\t"
-        "extp             %[Temp1],       $ac3,           31             \n\t"
-
-        /* even 2. pixel */
-        "mtlo             %[vector4a],    $ac2                           \n\t"
-        "mthi             $zero,          $ac2                           \n\t"
-        "balign           %[tp2],         %[tp1],         3              \n\t"
-        "dpa.w.ph         $ac2,           %[p2],          %[filter45]    \n\t"
-        "extp             %[Temp2],       $ac2,           31             \n\t"
-
-        /* odd 1. pixel */
-        "lbux             %[tp1],         %[Temp1](%[cm])                \n\t"
-        "mtlo             %[vector4a],    $ac3                           \n\t"
-        "mthi             $zero,          $ac3                           \n\t"
-        "preceu.ph.qbr    %[p1],          %[tp2]                         \n\t"
-        "preceu.ph.qbl    %[p2],          %[tp2]                         \n\t"
-        "dpa.w.ph         $ac3,           %[p1],          %[filter45]    \n\t"
-        "extp             %[Temp1],       $ac3,           31             \n\t"
-
-        /* odd 2. pixel */
-        "lbux             %[tp2],         %[Temp2](%[cm])                \n\t"
-        "mtlo             %[vector4a],    $ac2                           \n\t"
-        "mthi             $zero,          $ac2                           \n\t"
-        "dpa.w.ph         $ac2,           %[p2],          %[filter45]    \n\t"
-        "extp             %[Temp2],       $ac2,           31             \n\t"
-
-        /* clamp */
-        "lbux             %[p1],          %[Temp1](%[cm])                \n\t"
-        "lbux             %[p2],          %[Temp2](%[cm])                \n\t"
-
-        /* store bytes */
-        "sb               %[tp1],         0(%[dst_ptr])                  \n\t"
-        "addu             %[dst_ptr],     %[dst_ptr],     %[dst_stride]  \n\t"
-
-        "sb               %[p1],          0(%[dst_ptr])                  \n\t"
-        "addu             %[dst_ptr],     %[dst_ptr],     %[dst_stride]  \n\t"
-
-        "sb               %[tp2],         0(%[dst_ptr])                  \n\t"
-        "addu             %[dst_ptr],     %[dst_ptr],     %[dst_stride]  \n\t"
-
-        "sb               %[p2],          0(%[dst_ptr])                  \n\t"
-        "addu             %[dst_ptr],     %[dst_ptr],     %[dst_stride]  \n\t"
-
-        : [tp1] "=&r" (tp1), [tp2] "=&r" (tp2),
-          [p1] "=&r" (p1), [p2] "=&r" (p2),
-          [Temp1] "=&r" (Temp1), [Temp2] "=&r" (Temp2),
-          [dst_ptr] "+r" (dst_ptr)
-        : [filter45] "r" (filter45),[vector4a] "r" (vector4a),
-          [cm] "r" (cm), [src] "r" (src), [dst_stride] "r" (dst_stride)
-    );
-
-    /* Next row... */
-    src += src_stride;
-    dst += 1;
-  }
-}
-
-static void convolve_bi_horiz_8_transposed_dspr2(const uint8_t *src,
-                                                 int32_t src_stride,
-                                                 uint8_t *dst,
-                                                 int32_t dst_stride,
-                                                 const int16_t *filter_x0,
-                                                 int32_t h) {
-  int32_t y;
-  uint8_t *cm = vpx_ff_cropTbl;
-  uint8_t *dst_ptr;
-  uint32_t vector4a = 64;
-  int32_t Temp1, Temp2, Temp3;
-  uint32_t tp1, tp2, tp3;
-  uint32_t p1, p2, p3, p4;
-  uint8_t *odd_dst;
-  uint32_t dst_pitch_2 = (dst_stride << 1);
-  const int16_t *filter = &filter_x0[3];
-  uint32_t      filter45;
-
-  filter45 = ((const int32_t *)filter)[0];
-
-  for (y = h; y--;) {
-    /* prefetch data to cache memory */
-    prefetch_load(src + src_stride);
-    prefetch_load(src + src_stride + 32);
-
-    dst_ptr = dst;
-    odd_dst = (dst_ptr + dst_stride);
-
-    __asm__ __volatile__ (
-        "ulw              %[tp1],         0(%[src])                       \n\t"
-        "ulw              %[tp2],         4(%[src])                       \n\t"
-
-        /* even 1. pixel */
-        "mtlo             %[vector4a],    $ac3                            \n\t"
-        "mthi             $zero,          $ac3                            \n\t"
-        "mtlo             %[vector4a],    $ac2                            \n\t"
-        "mthi             $zero,          $ac2                            \n\t"
-        "preceu.ph.qbr    %[p1],          %[tp1]                          \n\t"
-        "preceu.ph.qbl    %[p2],          %[tp1]                          \n\t"
-        "preceu.ph.qbr    %[p3],          %[tp2]                          \n\t"
-        "preceu.ph.qbl    %[p4],          %[tp2]                          \n\t"
-        "ulw              %[tp3],         8(%[src])                       \n\t"
-        "dpa.w.ph         $ac3,           %[p1],          %[filter45]     \n\t"
-        "extp             %[Temp1],       $ac3,           31              \n\t"
-
-        /* even 2. pixel */
-        "dpa.w.ph         $ac2,           %[p2],          %[filter45]     \n\t"
-        "extp             %[Temp3],       $ac2,           31              \n\t"
-
-        /* even 3. pixel */
-        "lbux             %[Temp2],       %[Temp1](%[cm])                 \n\t"
-        "mtlo             %[vector4a],    $ac1                            \n\t"
-        "mthi             $zero,          $ac1                            \n\t"
-        "balign           %[tp3],         %[tp2],         3              \n\t"
-        "balign           %[tp2],         %[tp1],         3              \n\t"
-        "dpa.w.ph         $ac1,           %[p3],          %[filter45]     \n\t"
-        "lbux             %[tp1],         %[Temp3](%[cm])                 \n\t"
-        "extp             %[p3],          $ac1,           31              \n\t"
-
-        /* even 4. pixel */
-        "mtlo             %[vector4a],    $ac2                            \n\t"
-        "mthi             $zero,          $ac2                            \n\t"
-        "mtlo             %[vector4a],    $ac3                            \n\t"
-        "mthi             $zero,          $ac3                            \n\t"
-        "sb               %[Temp2],       0(%[dst_ptr])                   \n\t"
-        "addu             %[dst_ptr],     %[dst_ptr],     %[dst_pitch_2]  \n\t"
-        "sb               %[tp1],         0(%[dst_ptr])                   \n\t"
-        "addu             %[dst_ptr],     %[dst_ptr],     %[dst_pitch_2]  \n\t"
-
-        "dpa.w.ph         $ac2,           %[p4],          %[filter45]     \n\t"
-        "extp             %[Temp3],       $ac2,           31              \n\t"
-
-        "lbux             %[Temp1],         %[p3](%[cm])                    \n\t"
-
-        /* odd 1. pixel */
-        "mtlo             %[vector4a],    $ac1                            \n\t"
-        "mthi             $zero,          $ac1                            \n\t"
-        "preceu.ph.qbr    %[p1],          %[tp2]                          \n\t"
-        "preceu.ph.qbl    %[p2],          %[tp2]                          \n\t"
-        "preceu.ph.qbr    %[p3],          %[tp3]                          \n\t"
-        "preceu.ph.qbl    %[p4],          %[tp3]                          \n\t"
-        "sb               %[Temp1],       0(%[dst_ptr])                   \n\t"
-        "addu             %[dst_ptr],     %[dst_ptr],     %[dst_pitch_2]  \n\t"
-
-        "dpa.w.ph         $ac3,           %[p1],          %[filter45]     \n\t"
-        "extp             %[Temp2],       $ac3,           31              \n\t"
-
-        /* odd 2. pixel */
-        "lbux             %[tp1],         %[Temp3](%[cm])                 \n\t"
-        "mtlo             %[vector4a],    $ac3                            \n\t"
-        "mthi             $zero,          $ac3                            \n\t"
-        "mtlo             %[vector4a],    $ac2                            \n\t"
-        "mthi             $zero,          $ac2                            \n\t"
-        "dpa.w.ph         $ac1,           %[p2],          %[filter45]     \n\t"
-        "sb               %[tp1],         0(%[dst_ptr])                   \n\t"
-        "addu             %[dst_ptr],     %[dst_ptr],     %[dst_pitch_2]  \n\t"
-        "extp             %[Temp3],       $ac1,           31              \n\t"
-
-        /* odd 3. pixel */
-        "lbux             %[tp3],         %[Temp2](%[cm])                 \n\t"
-        "dpa.w.ph         $ac3,           %[p3],          %[filter45]     \n\t"
-        "extp             %[Temp2],       $ac3,           31              \n\t"
-
-        /* odd 4. pixel */
-        "sb               %[tp3],         0(%[odd_dst])                   \n\t"
-        "addu             %[odd_dst],     %[odd_dst],     %[dst_pitch_2]  \n\t"
-        "dpa.w.ph         $ac2,           %[p4],          %[filter45]     \n\t"
-        "extp             %[Temp1],       $ac2,           31              \n\t"
-
-        /* clamp */
-        "lbux             %[p4],          %[Temp3](%[cm])                 \n\t"
-        "lbux             %[p2],          %[Temp2](%[cm])                 \n\t"
-        "lbux             %[p1],          %[Temp1](%[cm])                 \n\t"
-
-        /* store bytes */
-        "sb               %[p4],          0(%[odd_dst])                   \n\t"
-        "addu             %[odd_dst],     %[odd_dst],     %[dst_pitch_2]  \n\t"
-
-        "sb               %[p2],          0(%[odd_dst])                   \n\t"
-        "addu             %[odd_dst],     %[odd_dst],     %[dst_pitch_2]  \n\t"
-
-        "sb               %[p1],          0(%[odd_dst])                   \n\t"
-
-        : [tp1] "=&r" (tp1), [tp2] "=&r" (tp2), [tp3] "=&r" (tp3),
-          [p1] "=&r" (p1), [p2] "=&r" (p2), [p3] "=&r" (p3), [p4] "=&r" (p4),
-          [Temp1] "=&r" (Temp1), [Temp2] "=&r" (Temp2), [Temp3] "=&r" (Temp3),
-          [dst_ptr] "+r" (dst_ptr), [odd_dst] "+r" (odd_dst)
-        : [filter45] "r" (filter45),[vector4a] "r" (vector4a), [cm] "r" (cm),
-          [src] "r" (src), [dst_pitch_2] "r" (dst_pitch_2)
-    );
-
-    /* Next row... */
-    src += src_stride;
-    dst += 1;
-  }
-}
-
-static void convolve_bi_horiz_16_transposed_dspr2(const uint8_t *src_ptr,
-                                                  int32_t src_stride,
-                                                  uint8_t *dst_ptr,
-                                                  int32_t dst_stride,
-                                                  const int16_t *filter_x0,
-                                                  int32_t h,
-                                                  int32_t count) {
-  int32_t       c, y;
-  const uint8_t *src;
-  uint8_t       *dst;
-  uint8_t       *cm = vpx_ff_cropTbl;
-  uint32_t      vector_64 = 64;
-  int32_t       Temp1, Temp2, Temp3;
-  uint32_t      qload1, qload2;
-  uint32_t      p1, p2, p3, p4, p5;
-  uint32_t      st1, st2, st3;
-  uint32_t      dst_pitch_2 = (dst_stride << 1);
-  uint8_t       *odd_dst;
-  const int16_t *filter = &filter_x0[3];
-  uint32_t      filter45;
-
-  filter45 = ((const int32_t *)filter)[0];
-
-  for (y = h; y--;) {
-    /* prefetch data to cache memory */
-    prefetch_load(src_ptr + src_stride);
-    prefetch_load(src_ptr + src_stride + 32);
-
-    src = src_ptr;
-    dst = dst_ptr;
-
-    odd_dst = (dst + dst_stride);
-
-    for (c = 0; c < count; c++) {
-      __asm__ __volatile__ (
-          "ulw              %[qload1],        0(%[src])                       \n\t"
-          "ulw              %[qload2],        4(%[src])                       \n\t"
-
-          /* even 1. pixel */
-          "mtlo             %[vector_64],     $ac1                            \n\t" /* even 1 */
-          "mthi             $zero,            $ac1                            \n\t"
-          "mtlo             %[vector_64],     $ac2                            \n\t" /* even 2 */
-          "mthi             $zero,            $ac2                            \n\t"
-          "preceu.ph.qbr    %[p1],            %[qload1]                       \n\t"
-          "preceu.ph.qbl    %[p2],            %[qload1]                       \n\t"
-          "preceu.ph.qbr    %[p3],            %[qload2]                       \n\t"
-          "preceu.ph.qbl    %[p4],            %[qload2]                       \n\t"
-          "ulw              %[qload1],        8(%[src])                       \n\t"
-          "dpa.w.ph         $ac1,             %[p1],          %[filter45]     \n\t" /* even 1 */
-          "extp             %[Temp1],         $ac1,           31              \n\t" /* even 1 */
-
-          /* even 2. pixel */
-          "mtlo             %[vector_64],     $ac3                            \n\t" /* even 3 */
-          "mthi             $zero,            $ac3                            \n\t"
-          "preceu.ph.qbr    %[p1],            %[qload1]                       \n\t"
-          "preceu.ph.qbl    %[p5],            %[qload1]                       \n\t"
-          "ulw              %[qload2],        12(%[src])                      \n\t"
-          "dpa.w.ph         $ac2,             %[p2],          %[filter45]     \n\t" /* even 1 */
-          "lbux             %[st1],           %[Temp1](%[cm])                 \n\t" /* even 1 */
-          "extp             %[Temp2],         $ac2,           31              \n\t" /* even 1 */
-
-          /* even 3. pixel */
-          "mtlo             %[vector_64],     $ac1                            \n\t" /* even 4 */
-          "mthi             $zero,            $ac1                            \n\t"
-          "preceu.ph.qbr    %[p2],            %[qload2]                       \n\t"
-          "sb               %[st1],           0(%[dst])                       \n\t" /* even 1 */
-          "addu             %[dst],           %[dst],         %[dst_pitch_2]             \n\t"
-          "dpa.w.ph         $ac3,             %[p3],          %[filter45]     \n\t" /* even 3 */
-          "extp             %[Temp3],         $ac3,           31              \n\t" /* even 3 */
-          "lbux             %[st2],           %[Temp2](%[cm])                 \n\t" /* even 1 */
-
-          /* even 4. pixel */
-          "mtlo             %[vector_64],     $ac2                            \n\t" /* even 5 */
-          "mthi             $zero,            $ac2                            \n\t"
-          "preceu.ph.qbl    %[p3],            %[qload2]                       \n\t"
-          "sb               %[st2],           0(%[dst])                       \n\t" /* even 2 */
-          "addu             %[dst],           %[dst],         %[dst_pitch_2]  \n\t"
-          "dpa.w.ph         $ac1,             %[p4],          %[filter45]     \n\t" /* even 4 */
-          "extp             %[Temp1],         $ac1,           31              \n\t" /* even 4 */
-          "lbux             %[st3],           %[Temp3](%[cm])                 \n\t" /* even 3 */
-
-          /* even 5. pixel */
-          "mtlo             %[vector_64],     $ac3                            \n\t" /* even 6 */
-          "mthi             $zero,            $ac3                            \n\t"
-          "sb               %[st3],           0(%[dst])                       \n\t" /* even 3 */
-          "addu             %[dst],           %[dst],         %[dst_pitch_2]  \n\t"
-          "dpa.w.ph         $ac2,             %[p1],          %[filter45]     \n\t" /* even 5 */
-          "extp             %[Temp2],         $ac2,           31              \n\t" /* even 5 */
-          "lbux             %[st1],           %[Temp1](%[cm])                 \n\t" /* even 4 */
-
-          /* even 6. pixel */
-          "mtlo             %[vector_64],     $ac1                            \n\t" /* even 7 */
-          "mthi             $zero,            $ac1                            \n\t"
-          "sb               %[st1],           0(%[dst])                       \n\t" /* even 4 */
-          "addu             %[dst],           %[dst],         %[dst_pitch_2]  \n\t"
-          "ulw              %[qload1],        20(%[src])                      \n\t"
-          "dpa.w.ph         $ac3,             %[p5],          %[filter45]     \n\t" /* even 6 */
-          "extp             %[Temp3],         $ac3,           31              \n\t" /* even 6 */
-          "lbux             %[st2],           %[Temp2](%[cm])                 \n\t" /* even 5 */
-
-          /* even 7. pixel */
-          "mtlo             %[vector_64],     $ac2                            \n\t" /* even 8 */
-          "mthi             $zero,            $ac2                            \n\t"
-          "preceu.ph.qbr    %[p5],            %[qload1]                       \n\t"
-          "sb               %[st2],           0(%[dst])                       \n\t" /* even 5 */
-          "addu             %[dst],           %[dst],         %[dst_pitch_2]  \n\t"
-          "dpa.w.ph         $ac1,             %[p2],          %[filter45]     \n\t" /* even 7 */
-          "extp             %[Temp1],         $ac1,           31              \n\t" /* even 7 */
-          "lbux             %[st3],           %[Temp3](%[cm])                 \n\t" /* even 6 */
-
-          /* even 8. pixel */
-          "mtlo             %[vector_64],     $ac3                            \n\t" /* odd 1 */
-          "mthi             $zero,            $ac3                            \n\t"
-          "dpa.w.ph         $ac2,             %[p3],          %[filter45]     \n\t" /* even 8 */
-          "sb               %[st3],           0(%[dst])                       \n\t" /* even 6 */
-          "addu             %[dst],           %[dst],         %[dst_pitch_2]  \n\t"
-          "extp             %[Temp2],         $ac2,           31              \n\t" /* even 8 */
-          "lbux             %[st1],           %[Temp1](%[cm])                 \n\t" /* even 7 */
-
-          /* ODD pixels */
-          "ulw              %[qload1],        1(%[src])                       \n\t"
-          "ulw              %[qload2],        5(%[src])                       \n\t"
-
-          /* odd 1. pixel */
-          "mtlo             %[vector_64],     $ac1                            \n\t" /* odd 2 */
-          "mthi             $zero,            $ac1                            \n\t"
-          "preceu.ph.qbr    %[p1],            %[qload1]                       \n\t"
-          "preceu.ph.qbl    %[p2],            %[qload1]                       \n\t"
-          "preceu.ph.qbr    %[p3],            %[qload2]                       \n\t"
-          "preceu.ph.qbl    %[p4],            %[qload2]                       \n\t"
-          "sb               %[st1],           0(%[dst])                       \n\t" /* even 7 */
-          "addu             %[dst],           %[dst],         %[dst_pitch_2]  \n\t"
-          "ulw              %[qload2],        9(%[src])                       \n\t"
-          "dpa.w.ph         $ac3,             %[p1],          %[filter45]     \n\t" /* odd 1 */
-          "extp             %[Temp3],         $ac3,           31              \n\t" /* odd 1 */
-          "lbux             %[st2],           %[Temp2](%[cm])                 \n\t" /* even 8 */
-
-          /* odd 2. pixel */
-          "mtlo             %[vector_64],     $ac2                            \n\t" /* odd 3 */
-          "mthi             $zero,            $ac2                            \n\t"
-          "preceu.ph.qbr    %[p1],            %[qload2]                       \n\t"
-          "preceu.ph.qbl    %[p5],            %[qload2]                       \n\t"
-          "sb               %[st2],           0(%[dst])                       \n\t" /* even 8 */
-          "ulw              %[qload1],        13(%[src])                      \n\t"
-          "dpa.w.ph         $ac1,             %[p2],          %[filter45]     \n\t" /* odd 2 */
-          "extp             %[Temp1],         $ac1,           31              \n\t" /* odd 2 */
-          "lbux             %[st3],           %[Temp3](%[cm])                 \n\t" /* odd 1 */
-
-          /* odd 3. pixel */
-          "mtlo             %[vector_64],     $ac3                            \n\t" /* odd 4 */
-          "mthi             $zero,            $ac3                            \n\t"
-          "preceu.ph.qbr    %[p2],            %[qload1]                       \n\t"
-          "sb               %[st3],           0(%[odd_dst])                   \n\t" /* odd 1 */
-          "addu             %[odd_dst],       %[odd_dst],     %[dst_pitch_2]  \n\t"
-          "dpa.w.ph         $ac2,             %[p3],          %[filter45]     \n\t" /* odd 3 */
-          "extp             %[Temp2],         $ac2,           31              \n\t" /* odd 3 */
-          "lbux             %[st1],           %[Temp1](%[cm])                 \n\t" /* odd 2 */
-
-          /* odd 4. pixel */
-          "mtlo             %[vector_64],     $ac1                            \n\t" /* odd 5 */
-          "mthi             $zero,            $ac1                            \n\t"
-          "preceu.ph.qbl    %[p3],            %[qload1]                       \n\t"
-          "sb               %[st1],           0(%[odd_dst])                   \n\t" /* odd 2 */
-          "addu             %[odd_dst],       %[odd_dst],     %[dst_pitch_2]  \n\t"
-          "dpa.w.ph         $ac3,             %[p4],          %[filter45]     \n\t" /* odd 4 */
-          "extp             %[Temp3],         $ac3,           31              \n\t" /* odd 4 */
-          "lbux             %[st2],           %[Temp2](%[cm])                 \n\t" /* odd 3 */
-
-          /* odd 5. pixel */
-          "mtlo             %[vector_64],     $ac2                            \n\t" /* odd 6 */
-          "mthi             $zero,            $ac2                            \n\t"
-          "sb               %[st2],           0(%[odd_dst])                   \n\t" /* odd 3 */
-          "addu             %[odd_dst],       %[odd_dst],     %[dst_pitch_2]  \n\t"
-          "dpa.w.ph         $ac1,             %[p1],          %[filter45]     \n\t" /* odd 5 */
-          "extp             %[Temp1],         $ac1,           31              \n\t" /* odd 5 */
-          "lbux             %[st3],           %[Temp3](%[cm])                 \n\t" /* odd 4 */
-
-          /* odd 6. pixel */
-          "mtlo             %[vector_64],     $ac3                            \n\t" /* odd 7 */
-          "mthi             $zero,            $ac3                            \n\t"
-          "sb               %[st3],           0(%[odd_dst])                   \n\t" /* odd 4 */
-          "addu             %[odd_dst],       %[odd_dst],     %[dst_pitch_2]  \n\t"
-          "ulw              %[qload1],        21(%[src])                      \n\t"
-          "dpa.w.ph         $ac2,             %[p5],          %[filter45]     \n\t" /* odd 6 */
-          "extp             %[Temp2],         $ac2,           31              \n\t" /* odd 6 */
-          "lbux             %[st1],           %[Temp1](%[cm])                 \n\t" /* odd 5 */
-
-          /* odd 7. pixel */
-          "mtlo             %[vector_64],     $ac1                            \n\t" /* odd 8 */
-          "mthi             $zero,            $ac1                            \n\t"
-          "preceu.ph.qbr    %[p5],            %[qload1]                       \n\t"
-          "sb               %[st1],           0(%[odd_dst])                   \n\t" /* odd 5 */
-          "addu             %[odd_dst],       %[odd_dst],     %[dst_pitch_2]  \n\t"
-          "dpa.w.ph         $ac3,             %[p2],          %[filter45]     \n\t" /* odd 7 */
-          "extp             %[Temp3],         $ac3,           31              \n\t" /* odd 7 */
-
-          /* odd 8. pixel */
-          "dpa.w.ph         $ac1,             %[p3],          %[filter45]     \n\t" /* odd 8 */
-          "extp             %[Temp1],         $ac1,           31              \n\t" /* odd 8 */
-
-          "lbux             %[st2],           %[Temp2](%[cm])                 \n\t" /* odd 6 */
-          "lbux             %[st3],           %[Temp3](%[cm])                 \n\t" /* odd 7 */
-          "lbux             %[st1],           %[Temp1](%[cm])                 \n\t" /* odd 8 */
-
-          "sb               %[st2],           0(%[odd_dst])                   \n\t" /* odd 6 */
-          "addu             %[odd_dst],       %[odd_dst],     %[dst_pitch_2]  \n\t"
-
-          "sb               %[st3],           0(%[odd_dst])                   \n\t" /* odd 7 */
-          "addu             %[odd_dst],       %[odd_dst],     %[dst_pitch_2]  \n\t"
-
-          "sb               %[st1],           0(%[odd_dst])                   \n\t" /* odd 8 */
-
-          : [qload1] "=&r" (qload1), [qload2] "=&r" (qload2), [p5] "=&r" (p5),
-            [st1] "=&r" (st1), [st2] "=&r" (st2), [st3] "=&r" (st3),
-            [p1] "=&r" (p1), [p2] "=&r" (p2), [p3] "=&r" (p3), [p4] "=&r" (p4),
-            [Temp1] "=&r" (Temp1), [Temp2] "=&r" (Temp2), [Temp3] "=&r" (Temp3),
-            [dst] "+r" (dst), [odd_dst] "+r" (odd_dst)
-          : [filter45] "r" (filter45), [vector_64] "r" (vector_64),
-            [cm] "r" (cm),
-            [src] "r" (src), [dst_pitch_2] "r" (dst_pitch_2)
-      );
-
-      src += 16;
-      dst = (dst_ptr + ((c + 1) * 16 * dst_stride));
-      odd_dst = (dst + dst_stride);
-    }
-
-    /* Next row... */
-    src_ptr += src_stride;
-    dst_ptr += 1;
-  }
-}
-
-static void convolve_bi_horiz_64_transposed_dspr2(const uint8_t *src_ptr,
-                                                  int32_t src_stride,
-                                                  uint8_t *dst_ptr,
-                                                  int32_t dst_stride,
-                                                  const int16_t *filter_x0,
-                                                  int32_t h) {
-  int32_t       c, y;
-  const uint8_t *src;
-  uint8_t       *dst;
-  uint8_t       *cm = vpx_ff_cropTbl;
-  uint32_t      vector_64 = 64;
-  int32_t       Temp1, Temp2, Temp3;
-  uint32_t      qload1, qload2;
-  uint32_t      p1, p2, p3, p4, p5;
-  uint32_t      st1, st2, st3;
-  uint32_t      dst_pitch_2 = (dst_stride << 1);
-  uint8_t       *odd_dst;
-  const int16_t *filter = &filter_x0[3];
-  uint32_t      filter45;
-
-  filter45 = ((const int32_t *)filter)[0];
-
-  for (y = h; y--;) {
-    /* prefetch data to cache memory */
-    prefetch_load(src_ptr + src_stride);
-    prefetch_load(src_ptr + src_stride + 32);
-    prefetch_load(src_ptr + src_stride + 64);
-
-    src = src_ptr;
-    dst = dst_ptr;
-
-    odd_dst = (dst + dst_stride);
-
-    for (c = 0; c < 4; c++) {
-      __asm__ __volatile__ (
-          "ulw              %[qload1],        0(%[src])                       \n\t"
-          "ulw              %[qload2],        4(%[src])                       \n\t"
-
-          /* even 1. pixel */
-          "mtlo             %[vector_64],     $ac1                            \n\t" /* even 1 */
-          "mthi             $zero,            $ac1                            \n\t"
-          "mtlo             %[vector_64],     $ac2                            \n\t" /* even 2 */
-          "mthi             $zero,            $ac2                            \n\t"
-          "preceu.ph.qbr    %[p1],            %[qload1]                       \n\t"
-          "preceu.ph.qbl    %[p2],            %[qload1]                       \n\t"
-          "preceu.ph.qbr    %[p3],            %[qload2]                       \n\t"
-          "preceu.ph.qbl    %[p4],            %[qload2]                       \n\t"
-          "ulw              %[qload1],        8(%[src])                       \n\t"
-          "dpa.w.ph         $ac1,             %[p1],          %[filter45]     \n\t" /* even 1 */
-          "extp             %[Temp1],         $ac1,           31              \n\t" /* even 1 */
-
-          /* even 2. pixel */
-          "mtlo             %[vector_64],     $ac3                            \n\t" /* even 3 */
-          "mthi             $zero,            $ac3                            \n\t"
-          "preceu.ph.qbr    %[p1],            %[qload1]                       \n\t"
-          "preceu.ph.qbl    %[p5],            %[qload1]                       \n\t"
-          "ulw              %[qload2],        12(%[src])                      \n\t"
-          "dpa.w.ph         $ac2,             %[p2],          %[filter45]     \n\t" /* even 1 */
-          "lbux             %[st1],           %[Temp1](%[cm])                 \n\t" /* even 1 */
-          "extp             %[Temp2],         $ac2,           31              \n\t" /* even 1 */
-
-          /* even 3. pixel */
-          "mtlo             %[vector_64],     $ac1                            \n\t" /* even 4 */
-          "mthi             $zero,            $ac1                            \n\t"
-          "preceu.ph.qbr    %[p2],            %[qload2]                       \n\t"
-          "sb               %[st1],           0(%[dst])                       \n\t" /* even 1 */
-          "addu             %[dst],           %[dst],         %[dst_pitch_2]             \n\t"
-          "dpa.w.ph         $ac3,             %[p3],          %[filter45]     \n\t" /* even 3 */
-          "extp             %[Temp3],         $ac3,           31              \n\t" /* even 3 */
-          "lbux             %[st2],           %[Temp2](%[cm])                 \n\t" /* even 1 */
-
-          /* even 4. pixel */
-          "mtlo             %[vector_64],     $ac2                            \n\t" /* even 5 */
-          "mthi             $zero,            $ac2                            \n\t"
-          "preceu.ph.qbl    %[p3],            %[qload2]                       \n\t"
-          "sb               %[st2],           0(%[dst])                       \n\t" /* even 2 */
-          "addu             %[dst],           %[dst],         %[dst_pitch_2]  \n\t"
-          "dpa.w.ph         $ac1,             %[p4],          %[filter45]     \n\t" /* even 4 */
-          "extp             %[Temp1],         $ac1,           31              \n\t" /* even 4 */
-          "lbux             %[st3],           %[Temp3](%[cm])                 \n\t" /* even 3 */
-
-          /* even 5. pixel */
-          "mtlo             %[vector_64],     $ac3                            \n\t" /* even 6 */
-          "mthi             $zero,            $ac3                            \n\t"
-          "sb               %[st3],           0(%[dst])                       \n\t" /* even 3 */
-          "addu             %[dst],           %[dst],         %[dst_pitch_2]  \n\t"
-          "dpa.w.ph         $ac2,             %[p1],          %[filter45]     \n\t" /* even 5 */
-          "extp             %[Temp2],         $ac2,           31              \n\t" /* even 5 */
-          "lbux             %[st1],           %[Temp1](%[cm])                 \n\t" /* even 4 */
-
-          /* even 6. pixel */
-          "mtlo             %[vector_64],     $ac1                            \n\t" /* even 7 */
-          "mthi             $zero,            $ac1                            \n\t"
-          "sb               %[st1],           0(%[dst])                       \n\t" /* even 4 */
-          "addu             %[dst],           %[dst],         %[dst_pitch_2]  \n\t"
-          "ulw              %[qload1],        20(%[src])                      \n\t"
-          "dpa.w.ph         $ac3,             %[p5],          %[filter45]     \n\t" /* even 6 */
-          "extp             %[Temp3],         $ac3,           31              \n\t" /* even 6 */
-          "lbux             %[st2],           %[Temp2](%[cm])                 \n\t" /* even 5 */
-
-          /* even 7. pixel */
-          "mtlo             %[vector_64],     $ac2                            \n\t" /* even 8 */
-          "mthi             $zero,            $ac2                            \n\t"
-          "preceu.ph.qbr    %[p5],            %[qload1]                       \n\t"
-          "sb               %[st2],           0(%[dst])                       \n\t" /* even 5 */
-          "addu             %[dst],           %[dst],         %[dst_pitch_2]  \n\t"
-          "dpa.w.ph         $ac1,             %[p2],          %[filter45]     \n\t" /* even 7 */
-          "extp             %[Temp1],         $ac1,           31              \n\t" /* even 7 */
-          "lbux             %[st3],           %[Temp3](%[cm])                 \n\t" /* even 6 */
-
-          /* even 8. pixel */
-          "mtlo             %[vector_64],     $ac3                            \n\t" /* odd 1 */
-          "mthi             $zero,            $ac3                            \n\t"
-          "dpa.w.ph         $ac2,             %[p3],          %[filter45]     \n\t" /* even 8 */
-          "sb               %[st3],           0(%[dst])                       \n\t" /* even 6 */
-          "addu             %[dst],           %[dst],         %[dst_pitch_2]  \n\t"
-          "extp             %[Temp2],         $ac2,           31              \n\t" /* even 8 */
-          "lbux             %[st1],           %[Temp1](%[cm])                 \n\t" /* even 7 */
-
-          /* ODD pixels */
-          "ulw              %[qload1],        1(%[src])                       \n\t"
-          "ulw              %[qload2],        5(%[src])                       \n\t"
-
-          /* odd 1. pixel */
-          "mtlo             %[vector_64],     $ac1                            \n\t" /* odd 2 */
-          "mthi             $zero,            $ac1                            \n\t"
-          "preceu.ph.qbr    %[p1],            %[qload1]                       \n\t"
-          "preceu.ph.qbl    %[p2],            %[qload1]                       \n\t"
-          "preceu.ph.qbr    %[p3],            %[qload2]                       \n\t"
-          "preceu.ph.qbl    %[p4],            %[qload2]                       \n\t"
-          "sb               %[st1],           0(%[dst])                       \n\t" /* even 7 */
-          "addu             %[dst],           %[dst],         %[dst_pitch_2]  \n\t"
-          "ulw              %[qload2],        9(%[src])                       \n\t"
-          "dpa.w.ph         $ac3,             %[p1],          %[filter45]     \n\t" /* odd 1 */
-          "extp             %[Temp3],         $ac3,           31              \n\t" /* odd 1 */
-          "lbux             %[st2],           %[Temp2](%[cm])                 \n\t" /* even 8 */
-
-          /* odd 2. pixel */
-          "mtlo             %[vector_64],     $ac2                            \n\t" /* odd 3 */
-          "mthi             $zero,            $ac2                            \n\t"
-          "preceu.ph.qbr    %[p1],            %[qload2]                       \n\t"
-          "preceu.ph.qbl    %[p5],            %[qload2]                       \n\t"
-          "sb               %[st2],           0(%[dst])                       \n\t" /* even 8 */
-          "ulw              %[qload1],        13(%[src])                      \n\t"
-          "dpa.w.ph         $ac1,             %[p2],          %[filter45]     \n\t" /* odd 2 */
-          "extp             %[Temp1],         $ac1,           31              \n\t" /* odd 2 */
-          "lbux             %[st3],           %[Temp3](%[cm])                 \n\t" /* odd 1 */
-
-          /* odd 3. pixel */
-          "mtlo             %[vector_64],     $ac3                            \n\t" /* odd 4 */
-          "mthi             $zero,            $ac3                            \n\t"
-          "preceu.ph.qbr    %[p2],            %[qload1]                       \n\t"
-          "sb               %[st3],           0(%[odd_dst])                   \n\t" /* odd 1 */
-          "addu             %[odd_dst],       %[odd_dst],     %[dst_pitch_2]  \n\t"
-          "dpa.w.ph         $ac2,             %[p3],          %[filter45]     \n\t" /* odd 3 */
-          "extp             %[Temp2],         $ac2,           31              \n\t" /* odd 3 */
-          "lbux             %[st1],           %[Temp1](%[cm])                 \n\t" /* odd 2 */
-
-          /* odd 4. pixel */
-          "mtlo             %[vector_64],     $ac1                            \n\t" /* odd 5 */
-          "mthi             $zero,            $ac1                            \n\t"
-          "preceu.ph.qbl    %[p3],            %[qload1]                       \n\t"
-          "sb               %[st1],           0(%[odd_dst])                   \n\t" /* odd 2 */
-          "addu             %[odd_dst],       %[odd_dst],     %[dst_pitch_2]  \n\t"
-          "dpa.w.ph         $ac3,             %[p4],          %[filter45]     \n\t" /* odd 4 */
-          "extp             %[Temp3],         $ac3,           31              \n\t" /* odd 4 */
-          "lbux             %[st2],           %[Temp2](%[cm])                 \n\t" /* odd 3 */
-
-          /* odd 5. pixel */
-          "mtlo             %[vector_64],     $ac2                            \n\t" /* odd 6 */
-          "mthi             $zero,            $ac2                            \n\t"
-          "sb               %[st2],           0(%[odd_dst])                   \n\t" /* odd 3 */
-          "addu             %[odd_dst],       %[odd_dst],     %[dst_pitch_2]  \n\t"
-          "dpa.w.ph         $ac1,             %[p1],          %[filter45]     \n\t" /* odd 5 */
-          "extp             %[Temp1],         $ac1,           31              \n\t" /* odd 5 */
-          "lbux             %[st3],           %[Temp3](%[cm])                 \n\t" /* odd 4 */
-
-          /* odd 6. pixel */
-          "mtlo             %[vector_64],     $ac3                            \n\t" /* odd 7 */
-          "mthi             $zero,            $ac3                            \n\t"
-          "sb               %[st3],           0(%[odd_dst])                   \n\t" /* odd 4 */
-          "addu             %[odd_dst],       %[odd_dst],     %[dst_pitch_2]  \n\t"
-          "ulw              %[qload1],        21(%[src])                      \n\t"
-          "dpa.w.ph         $ac2,             %[p5],          %[filter45]     \n\t" /* odd 6 */
-          "extp             %[Temp2],         $ac2,           31              \n\t" /* odd 6 */
-          "lbux             %[st1],           %[Temp1](%[cm])                 \n\t" /* odd 5 */
-
-          /* odd 7. pixel */
-          "mtlo             %[vector_64],     $ac1                            \n\t" /* odd 8 */
-          "mthi             $zero,            $ac1                            \n\t"
-          "preceu.ph.qbr    %[p5],            %[qload1]                       \n\t"
-          "sb               %[st1],           0(%[odd_dst])                   \n\t" /* odd 5 */
-          "addu             %[odd_dst],       %[odd_dst],     %[dst_pitch_2]  \n\t"
-          "dpa.w.ph         $ac3,             %[p2],          %[filter45]     \n\t" /* odd 7 */
-          "extp             %[Temp3],         $ac3,           31              \n\t" /* odd 7 */
-
-          /* odd 8. pixel */
-          "dpa.w.ph         $ac1,             %[p3],          %[filter45]     \n\t" /* odd 8 */
-          "extp             %[Temp1],         $ac1,           31              \n\t" /* odd 8 */
-
-          "lbux             %[st2],           %[Temp2](%[cm])                 \n\t" /* odd 6 */
-          "lbux             %[st3],           %[Temp3](%[cm])                 \n\t" /* odd 7 */
-          "lbux             %[st1],           %[Temp1](%[cm])                 \n\t" /* odd 8 */
-
-          "sb               %[st2],           0(%[odd_dst])                   \n\t" /* odd 6 */
-          "addu             %[odd_dst],       %[odd_dst],     %[dst_pitch_2]  \n\t"
-
-          "sb               %[st3],           0(%[odd_dst])                   \n\t" /* odd 7 */
-          "addu             %[odd_dst],       %[odd_dst],     %[dst_pitch_2]  \n\t"
-
-          "sb               %[st1],           0(%[odd_dst])                   \n\t" /* odd 8 */
-
-          : [qload1] "=&r" (qload1), [qload2] "=&r" (qload2), [p5] "=&r" (p5),
-            [st1] "=&r" (st1), [st2] "=&r" (st2), [st3] "=&r" (st3),
-            [p1] "=&r" (p1), [p2] "=&r" (p2), [p3] "=&r" (p3), [p4] "=&r" (p4),
-            [Temp1] "=&r" (Temp1), [Temp2] "=&r" (Temp2), [Temp3] "=&r" (Temp3),
-            [dst] "+r" (dst), [odd_dst] "+r" (odd_dst)
-          : [filter45] "r" (filter45), [vector_64] "r" (vector_64),
-            [cm] "r" (cm),
-            [src] "r" (src), [dst_pitch_2] "r" (dst_pitch_2)
-      );
-
-      src += 16;
-      dst = (dst_ptr + ((c + 1) * 16 * dst_stride));
-      odd_dst = (dst + dst_stride);
-    }
-
-    /* Next row... */
-    src_ptr += src_stride;
-    dst_ptr += 1;
-  }
-}
-
-void convolve_bi_horiz_transposed(const uint8_t *src, ptrdiff_t src_stride,
-                                  uint8_t *dst, ptrdiff_t dst_stride,
-                                  const int16_t *filter, int w, int h) {
-  int x, y;
-
-  for (y = 0; y < h; ++y) {
-    for (x = 0; x < w; ++x) {
-      int sum = 0;
-
-      sum += src[x] * filter[3];
-      sum += src[x + 1] * filter[4];
-
-      dst[x * dst_stride] = clip_pixel(ROUND_POWER_OF_TWO(sum, FILTER_BITS));
-    }
-
-    src += src_stride;
-    dst += 1;
-  }
-}
-
-void vpx_convolve2_dspr2(const uint8_t *src, ptrdiff_t src_stride,
-                         uint8_t *dst, ptrdiff_t dst_stride,
-                         const int16_t *filter,
-                         int w, int h) {
-  uint32_t pos = 38;
-
-  /* bit positon for extract from acc */
-  __asm__ __volatile__ (
-    "wrdsp      %[pos],     1           \n\t"
-    :
-    : [pos] "r" (pos)
-  );
-
-  /* prefetch data to cache memory */
-  prefetch_load(src);
-  prefetch_load(src + 32);
-
-  switch (w) {
-    case 4:
-      convolve_bi_horiz_4_transposed_dspr2(src, src_stride,
-                                           dst, dst_stride,
-                                           filter, h);
-      break;
-    case 8:
-      convolve_bi_horiz_8_transposed_dspr2(src, src_stride,
-                                           dst, dst_stride,
-                                           filter, h);
-      break;
-    case 16:
-    case 32:
-      convolve_bi_horiz_16_transposed_dspr2(src, src_stride,
-                                            dst, dst_stride,
-                                            filter, h,
-                                            (w/16));
-      break;
-    case 64:
-      prefetch_load(src + 32);
-      convolve_bi_horiz_64_transposed_dspr2(src, src_stride,
-                                            dst, dst_stride,
-                                            filter, h);
-      break;
-    default:
-      convolve_bi_horiz_transposed(src, src_stride,
-                                   dst, dst_stride,
-                                   filter, w, h);
-      break;
-  }
-}
-#endif
--- a/vpx_dsp/mips/vpx_convolve2_horiz_dspr2.c
+++ /dev/null
@@ -1,711 +1,0 @@
-/*
- *  Copyright (c) 2013 The WebM project authors. All Rights Reserved.
- *
- *  Use of this source code is governed by a BSD-style license
- *  that can be found in the LICENSE file in the root of the source
- *  tree. An additional intellectual property rights grant can be found
- *  in the file PATENTS.  All contributing project authors may
- *  be found in the AUTHORS file in the root of the source tree.
- */
-
-#include <assert.h>
-#include <stdio.h>
-
-#include "./vpx_dsp_rtcd.h"
-#include "vpx_dsp/mips/vpx_common_dspr2.h"
-#include "vpx_dsp/vpx_convolve.h"
-#include "vpx_dsp/vpx_dsp_common.h"
-#include "vpx_ports/mem.h"
-
-#if HAVE_DSPR2
-static void convolve_bi_horiz_4_dspr2(const uint8_t *src,
-                                      int32_t src_stride,
-                                      uint8_t *dst,
-                                      int32_t dst_stride,
-                                      const int16_t *filter_x0,
-                                      int32_t h) {
-  int32_t y;
-  uint8_t *cm = vpx_ff_cropTbl;
-  int32_t Temp1, Temp2, Temp3, Temp4;
-  uint32_t vector4a = 64;
-  uint32_t tp1, tp2;
-  uint32_t p1, p2;
-  const int16_t *filter = &filter_x0[3];
-  uint32_t filter45;;
-
-  filter45 = ((const int32_t *)filter)[0];
-
-  for (y = h; y--;) {
-    /* prefetch data to cache memory */
-    prefetch_load(src + src_stride);
-    prefetch_load(src + src_stride + 32);
-    prefetch_store(dst + dst_stride);
-
-    __asm__ __volatile__ (
-        "ulw              %[tp1],      0(%[src])                      \n\t"
-        "ulw              %[tp2],      4(%[src])                      \n\t"
-
-        /* even 1. pixel */
-        "mtlo             %[vector4a], $ac3                           \n\t"
-        "mthi             $zero,       $ac3                           \n\t"
-        "preceu.ph.qbr    %[p1],       %[tp1]                         \n\t"
-        "preceu.ph.qbl    %[p2],       %[tp1]                         \n\t"
-        "dpa.w.ph         $ac3,        %[p1],          %[filter45]    \n\t"
-        "extp             %[Temp1],    $ac3,           31             \n\t"
-
-        /* even 2. pixel */
-        "mtlo             %[vector4a], $ac2                           \n\t"
-        "mthi             $zero,       $ac2                           \n\t"
-        "balign           %[tp2],      %[tp1],         3              \n\t"
-        "dpa.w.ph         $ac2,        %[p2],          %[filter45]    \n\t"
-        "extp             %[Temp3],    $ac2,           31             \n\t"
-
-        /* odd 1. pixel */
-        "lbux             %[tp1],      %[Temp1](%[cm])                \n\t"
-        "mtlo             %[vector4a], $ac3                           \n\t"
-        "mthi             $zero,       $ac3                           \n\t"
-        "preceu.ph.qbr    %[p1],       %[tp2]                         \n\t"
-        "preceu.ph.qbl    %[p2],       %[tp2]                         \n\t"
-        "dpa.w.ph         $ac3,        %[p1],          %[filter45]    \n\t"
-        "extp             %[Temp2],    $ac3,           31             \n\t"
-
-        /* odd 2. pixel */
-        "lbux             %[tp2],      %[Temp3](%[cm])                \n\t"
-        "mtlo             %[vector4a], $ac2                           \n\t"
-        "mthi             $zero,       $ac2                           \n\t"
-        "dpa.w.ph         $ac2,        %[p2],          %[filter45]    \n\t"
-        "extp             %[Temp4],    $ac2,           31             \n\t"
-
-        /* clamp */
-        "lbux             %[p1],       %[Temp2](%[cm])                \n\t"
-        "lbux             %[p2],       %[Temp4](%[cm])                \n\t"
-
-        /* store bytes */
-        "sb               %[tp1],      0(%[dst])                      \n\t"
-        "sb               %[p1],       1(%[dst])                      \n\t"
-        "sb               %[tp2],      2(%[dst])                      \n\t"
-        "sb               %[p2],       3(%[dst])                      \n\t"
-
-        : [tp1] "=&r" (tp1), [tp2] "=&r" (tp2),
-          [p1] "=&r" (p1), [p2] "=&r" (p2),
-          [Temp1] "=&r" (Temp1), [Temp2] "=&r" (Temp2),
-          [Temp3] "=&r" (Temp3), [Temp4] "=&r" (Temp4)
-        : [filter45] "r" (filter45), [vector4a] "r" (vector4a),
-          [cm] "r" (cm), [dst] "r" (dst), [src] "r" (src)
-    );
-
-    /* Next row... */
-    src += src_stride;
-    dst += dst_stride;
-  }
-}
-
-static void convolve_bi_horiz_8_dspr2(const uint8_t *src,
-                                      int32_t src_stride,
-                                      uint8_t *dst,
-                                      int32_t dst_stride,
-                                      const int16_t *filter_x0,
-                                      int32_t h) {
-  int32_t y;
-  uint8_t *cm = vpx_ff_cropTbl;
-  uint32_t vector4a = 64;
-  int32_t Temp1, Temp2, Temp3;
-  uint32_t tp1, tp2, tp3;
-  uint32_t p1, p2, p3, p4;
-  uint32_t st0, st1;
-  const int16_t *filter = &filter_x0[3];
-  uint32_t filter45;;
-
-  filter45 = ((const int32_t *)filter)[0];
-
-  for (y = h; y--;) {
-    /* prefetch data to cache memory */
-    prefetch_load(src + src_stride);
-    prefetch_load(src + src_stride + 32);
-    prefetch_store(dst + dst_stride);
-
-    __asm__ __volatile__ (
-        "ulw              %[tp1],      0(%[src])                      \n\t"
-        "ulw              %[tp2],      4(%[src])                      \n\t"
-
-        /* even 1. pixel */
-        "mtlo             %[vector4a], $ac3                           \n\t"
-        "mthi             $zero,       $ac3                           \n\t"
-        "mtlo             %[vector4a], $ac2                           \n\t"
-        "mthi             $zero,       $ac2                           \n\t"
-        "preceu.ph.qbr    %[p1],       %[tp1]                         \n\t"
-        "preceu.ph.qbl    %[p2],       %[tp1]                         \n\t"
-        "preceu.ph.qbr    %[p3],       %[tp2]                         \n\t"
-        "preceu.ph.qbl    %[p4],       %[tp2]                         \n\t"
-        "ulw              %[tp3],      8(%[src])                      \n\t"
-        "dpa.w.ph         $ac3,        %[p1],          %[filter45]    \n\t"
-        "extp             %[Temp1],    $ac3,           31             \n\t"
-
-        /* even 2. pixel */
-        "dpa.w.ph         $ac2,        %[p2],          %[filter45]    \n\t"
-        "extp             %[Temp3],    $ac2,           31             \n\t"
-
-        /* even 3. pixel */
-        "lbux             %[st0],      %[Temp1](%[cm])                \n\t"
-        "mtlo             %[vector4a], $ac1                           \n\t"
-        "mthi             $zero,       $ac1                           \n\t"
-        "dpa.w.ph         $ac1,        %[p3],          %[filter45]    \n\t"
-        "extp             %[Temp1],    $ac1,           31             \n\t"
-
-        /* even 4. pixel */
-        "mtlo             %[vector4a], $ac2                           \n\t"
-        "mthi             $zero,       $ac2                           \n\t"
-        "mtlo             %[vector4a], $ac3                           \n\t"
-        "mthi             $zero,       $ac3                           \n\t"
-        "sb               %[st0],      0(%[dst])                      \n\t"
-        "lbux             %[st1],      %[Temp3](%[cm])                \n\t"
-
-        "balign           %[tp3],      %[tp2],         3              \n\t"
-        "balign           %[tp2],      %[tp1],         3              \n\t"
-
-        "dpa.w.ph         $ac2,        %[p4],          %[filter45]    \n\t"
-        "extp             %[Temp3],    $ac2,           31             \n\t"
-
-        "lbux             %[st0],      %[Temp1](%[cm])                \n\t"
-
-        /* odd 1. pixel */
-        "mtlo             %[vector4a], $ac1                           \n\t"
-        "mthi             $zero,       $ac1                           \n\t"
-        "sb               %[st1],      2(%[dst])                      \n\t"
-        "preceu.ph.qbr    %[p1],       %[tp2]                         \n\t"
-        "preceu.ph.qbl    %[p2],       %[tp2]                         \n\t"
-        "preceu.ph.qbr    %[p3],       %[tp3]                         \n\t"
-        "preceu.ph.qbl    %[p4],       %[tp3]                         \n\t"
-        "sb               %[st0],      4(%[dst])                      \n\t"
-        "dpa.w.ph         $ac3,        %[p1],          %[filter45]    \n\t"
-        "extp             %[Temp2],    $ac3,           31             \n\t"
-
-        /* odd 2. pixel */
-        "mtlo             %[vector4a], $ac3                           \n\t"
-        "mthi             $zero,       $ac3                           \n\t"
-        "mtlo             %[vector4a], $ac2                           \n\t"
-        "mthi             $zero,       $ac2                           \n\t"
-        "lbux             %[st0],      %[Temp3](%[cm])                \n\t"
-        "dpa.w.ph         $ac1,        %[p2],          %[filter45]    \n\t"
-        "extp             %[Temp3],    $ac1,           31             \n\t"
-
-        /* odd 3. pixel */
-        "lbux             %[st1],      %[Temp2](%[cm])                \n\t"
-        "dpa.w.ph         $ac3,        %[p3],          %[filter45]    \n\t"
-        "extp             %[Temp2],    $ac3,           31             \n\t"
-
-        /* odd 4. pixel */
-        "sb               %[st1],      1(%[dst])                      \n\t"
-        "sb               %[st0],      6(%[dst])                      \n\t"
-        "dpa.w.ph         $ac2,        %[p4],          %[filter45]    \n\t"
-        "extp             %[Temp1],    $ac2,           31             \n\t"
-
-        /* clamp */
-        "lbux             %[p4],       %[Temp3](%[cm])                \n\t"
-        "lbux             %[p2],       %[Temp2](%[cm])                \n\t"
-        "lbux             %[p1],       %[Temp1](%[cm])                \n\t"
-
-        /* store bytes */
-        "sb               %[p4],       3(%[dst])                      \n\t"
-        "sb               %[p2],       5(%[dst])                      \n\t"
-        "sb               %[p1],       7(%[dst])                      \n\t"
-
-        : [tp1] "=&r" (tp1), [tp2] "=&r" (tp2), [tp3] "=&r" (tp3),
-          [st0] "=&r" (st0), [st1] "=&r" (st1),
-          [p1] "=&r" (p1), [p2] "=&r" (p2), [p3] "=&r" (p3), [p4] "=&r" (p4),
-          [Temp1] "=&r" (Temp1), [Temp2] "=&r" (Temp2), [Temp3] "=&r" (Temp3)
-        : [filter45] "r" (filter45), [vector4a] "r" (vector4a),
-          [cm] "r" (cm), [dst] "r" (dst), [src] "r" (src)
-    );
-
-    /* Next row... */
-    src += src_stride;
-    dst += dst_stride;
-  }
-}
-
-static void convolve_bi_horiz_16_dspr2(const uint8_t *src_ptr,
-                                       int32_t src_stride,
-                                       uint8_t *dst_ptr,
-                                       int32_t dst_stride,
-                                       const int16_t *filter_x0,
-                                       int32_t h,
-                                       int32_t count) {
-  int32_t y, c;
-  const uint8_t *src;
-  uint8_t *dst;
-  uint8_t *cm = vpx_ff_cropTbl;
-  uint32_t vector_64 = 64;
-  int32_t Temp1, Temp2, Temp3;
-  uint32_t qload1, qload2, qload3;
-  uint32_t p1, p2, p3, p4, p5;
-  uint32_t st1, st2, st3;
-  const int16_t *filter = &filter_x0[3];
-  uint32_t filter45;;
-
-  filter45 = ((const int32_t *)filter)[0];
-
-  for (y = h; y--;) {
-    src = src_ptr;
-    dst = dst_ptr;
-
-    /* prefetch data to cache memory */
-    prefetch_load(src_ptr + src_stride);
-    prefetch_load(src_ptr + src_stride + 32);
-    prefetch_store(dst_ptr + dst_stride);
-
-    for (c = 0; c < count; c++) {
-      __asm__ __volatile__ (
-          "ulw              %[qload1],    0(%[src])                    \n\t"
-          "ulw              %[qload2],    4(%[src])                    \n\t"
-
-          /* even 1. pixel */
-          "mtlo             %[vector_64], $ac1                         \n\t" /* even 1 */
-          "mthi             $zero,        $ac1                         \n\t"
-          "mtlo             %[vector_64], $ac2                         \n\t" /* even 2 */
-          "mthi             $zero,        $ac2                         \n\t"
-          "preceu.ph.qbr    %[p1],        %[qload1]                    \n\t"
-          "preceu.ph.qbl    %[p2],        %[qload1]                    \n\t"
-          "preceu.ph.qbr    %[p3],        %[qload2]                    \n\t"
-          "preceu.ph.qbl    %[p4],        %[qload2]                    \n\t"
-          "ulw              %[qload3],    8(%[src])                    \n\t"
-          "dpa.w.ph         $ac1,         %[p1],          %[filter45]  \n\t" /* even 1 */
-          "extp             %[Temp1],     $ac1,           31           \n\t" /* even 1 */
-
-          /* even 2. pixel */
-          "mtlo             %[vector_64], $ac3                         \n\t" /* even 3 */
-          "mthi             $zero,        $ac3                         \n\t"
-          "preceu.ph.qbr    %[p1],        %[qload3]                    \n\t"
-          "preceu.ph.qbl    %[p5],        %[qload3]                    \n\t"
-          "ulw              %[qload1],    12(%[src])                   \n\t"
-          "dpa.w.ph         $ac2,         %[p2],          %[filter45]  \n\t" /* even 1 */
-          "extp             %[Temp2],     $ac2,           31           \n\t" /* even 1 */
-          "lbux             %[st1],       %[Temp1](%[cm])              \n\t" /* even 1 */
-
-          /* even 3. pixel */
-          "mtlo             %[vector_64], $ac1                         \n\t" /* even 4 */
-          "mthi             $zero,        $ac1                         \n\t"
-          "preceu.ph.qbr    %[p2],        %[qload1]                    \n\t"
-          "sb               %[st1],       0(%[dst])                    \n\t" /* even 1 */
-          "dpa.w.ph         $ac3,         %[p3],          %[filter45]  \n\t" /* even 3 */
-          "extp             %[Temp3],     $ac3,           31           \n\t" /* even 3 */
-          "lbux             %[st2],       %[Temp2](%[cm])              \n\t" /* even 1 */
-
-          /* even 4. pixel */
-          "mtlo             %[vector_64], $ac2                         \n\t" /* even 5 */
-          "mthi             $zero,        $ac2                         \n\t"
-          "preceu.ph.qbl    %[p3],        %[qload1]                    \n\t"
-          "sb               %[st2],       2(%[dst])                    \n\t" /* even 1 */
-          "dpa.w.ph         $ac1,         %[p4],          %[filter45]  \n\t" /* even 4 */
-          "extp             %[Temp1],     $ac1,           31           \n\t" /* even 4 */
-          "lbux             %[st3],       %[Temp3](%[cm])              \n\t" /* even 3 */
-
-          /* even 5. pixel */
-          "mtlo             %[vector_64], $ac3                         \n\t" /* even 6 */
-          "mthi             $zero,        $ac3                         \n\t"
-          "sb               %[st3],       4(%[dst])                    \n\t" /* even 3 */
-          "dpa.w.ph         $ac2,         %[p1],          %[filter45]  \n\t" /* even 5 */
-          "extp             %[Temp2],     $ac2,           31           \n\t" /* even 5 */
-          "lbux             %[st1],       %[Temp1](%[cm])              \n\t" /* even 4 */
-
-          /* even 6. pixel */
-          "mtlo             %[vector_64], $ac1                         \n\t" /* even 7 */
-          "mthi             $zero,        $ac1                         \n\t"
-          "sb               %[st1],       6(%[dst])                    \n\t" /* even 4 */
-          "dpa.w.ph         $ac3,         %[p5],          %[filter45]  \n\t" /* even 6 */
-          "extp             %[Temp3],     $ac3,           31           \n\t" /* even 6 */
-          "lbux             %[st2],       %[Temp2](%[cm])              \n\t" /* even 5 */
-
-          /* even 7. pixel */
-          "mtlo             %[vector_64], $ac2                         \n\t" /* even 8 */
-          "mthi             $zero,        $ac2                         \n\t"
-          "sb               %[st2],       8(%[dst])                    \n\t" /* even 5 */
-          "dpa.w.ph         $ac1,         %[p2],          %[filter45]  \n\t" /* even 7 */
-          "extp             %[Temp1],     $ac1,           31           \n\t" /* even 7 */
-          "lbux             %[st3],       %[Temp3](%[cm])              \n\t" /* even 6 */
-
-          /* even 8. pixel */
-          "mtlo             %[vector_64], $ac3                         \n\t" /* odd 1 */
-          "mthi             $zero,        $ac3                         \n\t"
-          "dpa.w.ph         $ac2,         %[p3],          %[filter45]  \n\t" /* even 8 */
-          "sb               %[st3],       10(%[dst])                   \n\t" /* even 6 */
-          "extp             %[Temp2],     $ac2,           31           \n\t" /* even 8 */
-          "lbux             %[st1],       %[Temp1](%[cm])              \n\t" /* even 7 */
-
-          /* ODD pixels */
-          "ulw              %[qload1],    1(%[src])                    \n\t"
-          "ulw              %[qload2],    5(%[src])                    \n\t"
-
-          /* odd 1. pixel */
-          "mtlo             %[vector_64], $ac1                         \n\t" /* odd 2 */
-          "mthi             $zero,        $ac1                         \n\t"
-          "preceu.ph.qbr    %[p1],        %[qload1]                    \n\t"
-          "preceu.ph.qbl    %[p2],        %[qload1]                    \n\t"
-          "preceu.ph.qbr    %[p3],        %[qload2]                    \n\t"
-          "preceu.ph.qbl    %[p4],        %[qload2]                    \n\t"
-          "sb               %[st1],       12(%[dst])                   \n\t" /* even 7 */
-          "ulw              %[qload3],    9(%[src])                    \n\t"
-          "dpa.w.ph         $ac3,         %[p1],          %[filter45]  \n\t" /* odd 1 */
-          "extp             %[Temp3],     $ac3,           31           \n\t" /* odd 1 */
-          "lbux             %[st2],       %[Temp2](%[cm])              \n\t" /* even 8 */
-
-          /* odd 2. pixel */
-          "mtlo             %[vector_64], $ac2                         \n\t" /* odd 3 */
-          "mthi             $zero,        $ac2                         \n\t"
-          "preceu.ph.qbr    %[p1],        %[qload3]                    \n\t"
-          "preceu.ph.qbl    %[p5],        %[qload3]                    \n\t"
-          "sb               %[st2],       14(%[dst])                   \n\t" /* even 8 */
-          "ulw              %[qload1],    13(%[src])                   \n\t"
-          "dpa.w.ph         $ac1,         %[p2],          %[filter45]  \n\t" /* odd 2 */
-          "extp             %[Temp1],     $ac1,           31           \n\t" /* odd 2 */
-          "lbux             %[st3],       %[Temp3](%[cm])              \n\t" /* odd 1 */
-
-          /* odd 3. pixel */
-          "mtlo             %[vector_64], $ac3                         \n\t" /* odd 4 */
-          "mthi             $zero,        $ac3                         \n\t"
-          "preceu.ph.qbr    %[p2],        %[qload1]                    \n\t"
-          "sb               %[st3],       1(%[dst])                    \n\t" /* odd 1 */
-          "dpa.w.ph         $ac2,         %[p3],          %[filter45]  \n\t" /* odd 3 */
-          "extp             %[Temp2],     $ac2,           31           \n\t" /* odd 3 */
-          "lbux             %[st1],       %[Temp1](%[cm])              \n\t" /* odd 2 */
-
-          /* odd 4. pixel */
-          "mtlo             %[vector_64], $ac1                         \n\t" /* odd 5 */
-          "mthi             $zero,        $ac1                         \n\t"
-          "preceu.ph.qbl    %[p3],        %[qload1]                    \n\t"
-          "sb               %[st1],       3(%[dst])                    \n\t" /* odd 2 */
-          "dpa.w.ph         $ac3,         %[p4],          %[filter45]  \n\t" /* odd 4 */
-          "extp             %[Temp3],     $ac3,           31           \n\t" /* odd 4 */
-          "lbux             %[st2],       %[Temp2](%[cm])              \n\t" /* odd 3 */
-
-          /* odd 5. pixel */
-          "mtlo             %[vector_64], $ac2                         \n\t" /* odd 6 */
-          "mthi             $zero,        $ac2                         \n\t"
-          "sb               %[st2],       5(%[dst])                    \n\t" /* odd 3 */
-          "dpa.w.ph         $ac1,         %[p1],          %[filter45]  \n\t" /* odd 5 */
-          "extp             %[Temp1],     $ac1,           31           \n\t" /* odd 5 */
-          "lbux             %[st3],       %[Temp3](%[cm])              \n\t" /* odd 4 */
-
-          /* odd 6. pixel */
-          "mtlo             %[vector_64], $ac3                         \n\t" /* odd 7 */
-          "mthi             $zero,        $ac3                         \n\t"
-          "sb               %[st3],       7(%[dst])                    \n\t" /* odd 4 */
-          "dpa.w.ph         $ac2,         %[p5],          %[filter45]  \n\t" /* odd 6 */
-          "extp             %[Temp2],     $ac2,           31           \n\t" /* odd 6 */
-          "lbux             %[st1],       %[Temp1](%[cm])              \n\t" /* odd 5 */
-
-          /* odd 7. pixel */
-          "mtlo             %[vector_64], $ac1                         \n\t" /* odd 8 */
-          "mthi             $zero,        $ac1                         \n\t"
-          "sb               %[st1],       9(%[dst])                    \n\t" /* odd 5 */
-          "dpa.w.ph         $ac3,         %[p2],          %[filter45]  \n\t" /* odd 7 */
-          "extp             %[Temp3],     $ac3,           31           \n\t" /* odd 7 */
-
-          /* odd 8. pixel */
-          "dpa.w.ph         $ac1,         %[p3],          %[filter45]  \n\t" /* odd 8 */
-          "extp             %[Temp1],     $ac1,           31           \n\t" /* odd 8 */
-
-          "lbux             %[st2],       %[Temp2](%[cm])              \n\t" /* odd 6 */
-          "lbux             %[st3],       %[Temp3](%[cm])              \n\t" /* odd 7 */
-          "lbux             %[st1],       %[Temp1](%[cm])              \n\t" /* odd 8 */
-
-          "sb               %[st2],       11(%[dst])                   \n\t" /* odd 6 */
-          "sb               %[st3],       13(%[dst])                   \n\t" /* odd 7 */
-          "sb               %[st1],       15(%[dst])                   \n\t" /* odd 8 */
-
-          : [qload1] "=&r" (qload1), [qload2] "=&r" (qload2), [qload3] "=&r" (qload3),
-            [st1] "=&r" (st1), [st2] "=&r" (st2), [st3] "=&r" (st3),
-            [p1] "=&r" (p1), [p2] "=&r" (p2), [p3] "=&r" (p3), [p4] "=&r" (p4),
-            [p5] "=&r" (p5),
-            [Temp1] "=&r" (Temp1), [Temp2] "=&r" (Temp2), [Temp3] "=&r" (Temp3)
-          : [filter45] "r" (filter45), [vector_64] "r" (vector_64),
-            [cm] "r" (cm), [dst] "r" (dst), [src] "r" (src)
-      );
-
-      src += 16;
-      dst += 16;
-    }
-
-    /* Next row... */
-    src_ptr += src_stride;
-    dst_ptr += dst_stride;
-  }
-}
-
-static void convolve_bi_horiz_64_dspr2(const uint8_t *src_ptr,
-                                       int32_t src_stride,
-                                       uint8_t *dst_ptr,
-                                       int32_t dst_stride,
-                                       const int16_t *filter_x0,
-                                       int32_t h) {
-  int32_t y, c;
-  const uint8_t *src;
-  uint8_t *dst;
-  uint8_t *cm = vpx_ff_cropTbl;
-  uint32_t vector_64 = 64;
-  int32_t Temp1, Temp2, Temp3;
-  uint32_t qload1, qload2, qload3;
-  uint32_t p1, p2, p3, p4, p5;
-  uint32_t st1, st2, st3;
-  const int16_t *filter = &filter_x0[3];
-  uint32_t filter45;;
-
-  filter45 = ((const int32_t *)filter)[0];
-
-  for (y = h; y--;) {
-    src = src_ptr;
-    dst = dst_ptr;
-
-    /* prefetch data to cache memory */
-    prefetch_load(src_ptr + src_stride);
-    prefetch_load(src_ptr + src_stride + 32);
-    prefetch_load(src_ptr + src_stride + 64);
-    prefetch_store(dst_ptr + dst_stride);
-    prefetch_store(dst_ptr + dst_stride + 32);
-
-    for (c = 0; c < 4; c++) {
-      __asm__ __volatile__ (
-          "ulw              %[qload1],    0(%[src])                    \n\t"
-          "ulw              %[qload2],    4(%[src])                    \n\t"
-
-          /* even 1. pixel */
-          "mtlo             %[vector_64], $ac1                         \n\t" /* even 1 */
-          "mthi             $zero,        $ac1                         \n\t"
-          "mtlo             %[vector_64], $ac2                         \n\t" /* even 2 */
-          "mthi             $zero,        $ac2                         \n\t"
-          "preceu.ph.qbr    %[p1],        %[qload1]                    \n\t"
-          "preceu.ph.qbl    %[p2],        %[qload1]                    \n\t"
-          "preceu.ph.qbr    %[p3],        %[qload2]                    \n\t"
-          "preceu.ph.qbl    %[p4],        %[qload2]                    \n\t"
-          "ulw              %[qload3],    8(%[src])                    \n\t"
-          "dpa.w.ph         $ac1,         %[p1],          %[filter45]  \n\t" /* even 1 */
-          "extp             %[Temp1],     $ac1,           31           \n\t" /* even 1 */
-
-          /* even 2. pixel */
-          "mtlo             %[vector_64], $ac3                         \n\t" /* even 3 */
-          "mthi             $zero,        $ac3                         \n\t"
-          "preceu.ph.qbr    %[p1],        %[qload3]                    \n\t"
-          "preceu.ph.qbl    %[p5],        %[qload3]                    \n\t"
-          "ulw              %[qload1],    12(%[src])                   \n\t"
-          "dpa.w.ph         $ac2,         %[p2],          %[filter45]  \n\t" /* even 1 */
-          "extp             %[Temp2],     $ac2,           31           \n\t" /* even 1 */
-          "lbux             %[st1],       %[Temp1](%[cm])              \n\t" /* even 1 */
-
-          /* even 3. pixel */
-          "mtlo             %[vector_64], $ac1                         \n\t" /* even 4 */
-          "mthi             $zero,        $ac1                         \n\t"
-          "preceu.ph.qbr    %[p2],        %[qload1]                    \n\t"
-          "sb               %[st1],       0(%[dst])                    \n\t" /* even 1 */
-          "dpa.w.ph         $ac3,         %[p3],          %[filter45]  \n\t" /* even 3 */
-          "extp             %[Temp3],     $ac3,           31           \n\t" /* even 3 */
-          "lbux             %[st2],       %[Temp2](%[cm])              \n\t" /* even 1 */
-
-          /* even 4. pixel */
-          "mtlo             %[vector_64], $ac2                         \n\t" /* even 5 */
-          "mthi             $zero,        $ac2                         \n\t"
-          "preceu.ph.qbl    %[p3],        %[qload1]                    \n\t"
-          "sb               %[st2],       2(%[dst])                    \n\t" /* even 1 */
-          "dpa.w.ph         $ac1,         %[p4],          %[filter45]  \n\t" /* even 4 */
-          "extp             %[Temp1],     $ac1,           31           \n\t" /* even 4 */
-          "lbux             %[st3],       %[Temp3](%[cm])              \n\t" /* even 3 */
-
-          /* even 5. pixel */
-          "mtlo             %[vector_64], $ac3                         \n\t" /* even 6 */
-          "mthi             $zero,        $ac3                         \n\t"
-          "sb               %[st3],       4(%[dst])                    \n\t" /* even 3 */
-          "dpa.w.ph         $ac2,         %[p1],          %[filter45]  \n\t" /* even 5 */
-          "extp             %[Temp2],     $ac2,           31           \n\t" /* even 5 */
-          "lbux             %[st1],       %[Temp1](%[cm])              \n\t" /* even 4 */
-
-          /* even 6. pixel */
-          "mtlo             %[vector_64], $ac1                         \n\t" /* even 7 */
-          "mthi             $zero,        $ac1                         \n\t"
-          "sb               %[st1],       6(%[dst])                    \n\t" /* even 4 */
-          "dpa.w.ph         $ac3,         %[p5],          %[filter45]  \n\t" /* even 6 */
-          "extp             %[Temp3],     $ac3,           31           \n\t" /* even 6 */
-          "lbux             %[st2],       %[Temp2](%[cm])              \n\t" /* even 5 */
-
-          /* even 7. pixel */
-          "mtlo             %[vector_64], $ac2                         \n\t" /* even 8 */
-          "mthi             $zero,        $ac2                         \n\t"
-          "sb               %[st2],       8(%[dst])                    \n\t" /* even 5 */
-          "dpa.w.ph         $ac1,         %[p2],          %[filter45]  \n\t" /* even 7 */
-          "extp             %[Temp1],     $ac1,           31           \n\t" /* even 7 */
-          "lbux             %[st3],       %[Temp3](%[cm])              \n\t" /* even 6 */
-
-          /* even 8. pixel */
-          "mtlo             %[vector_64], $ac3                         \n\t" /* odd 1 */
-          "mthi             $zero,        $ac3                         \n\t"
-          "dpa.w.ph         $ac2,         %[p3],          %[filter45]  \n\t" /* even 8 */
-          "sb               %[st3],       10(%[dst])                   \n\t" /* even 6 */
-          "extp             %[Temp2],     $ac2,           31           \n\t" /* even 8 */
-          "lbux             %[st1],       %[Temp1](%[cm])              \n\t" /* even 7 */
-
-          /* ODD pixels */
-          "ulw              %[qload1],    1(%[src])                    \n\t"
-          "ulw              %[qload2],    5(%[src])                    \n\t"
-
-          /* odd 1. pixel */
-          "mtlo             %[vector_64], $ac1                         \n\t" /* odd 2 */
-          "mthi             $zero,        $ac1                         \n\t"
-          "preceu.ph.qbr    %[p1],        %[qload1]                    \n\t"
-          "preceu.ph.qbl    %[p2],        %[qload1]                    \n\t"
-          "preceu.ph.qbr    %[p3],        %[qload2]                    \n\t"
-          "preceu.ph.qbl    %[p4],        %[qload2]                    \n\t"
-          "sb               %[st1],       12(%[dst])                   \n\t" /* even 7 */
-          "ulw              %[qload3],    9(%[src])                    \n\t"
-          "dpa.w.ph         $ac3,         %[p1],          %[filter45]  \n\t" /* odd 1 */
-          "extp             %[Temp3],     $ac3,           31           \n\t" /* odd 1 */
-          "lbux             %[st2],       %[Temp2](%[cm])              \n\t" /* even 8 */
-
-          /* odd 2. pixel */
-          "mtlo             %[vector_64], $ac2                         \n\t" /* odd 3 */
-          "mthi             $zero,        $ac2                         \n\t"
-          "preceu.ph.qbr    %[p1],        %[qload3]                    \n\t"
-          "preceu.ph.qbl    %[p5],        %[qload3]                    \n\t"
-          "sb               %[st2],       14(%[dst])                   \n\t" /* even 8 */
-          "ulw              %[qload1],    13(%[src])                   \n\t"
-          "dpa.w.ph         $ac1,         %[p2],          %[filter45]  \n\t" /* odd 2 */
-          "extp             %[Temp1],     $ac1,           31           \n\t" /* odd 2 */
-          "lbux             %[st3],       %[Temp3](%[cm])              \n\t" /* odd 1 */
-
-          /* odd 3. pixel */
-          "mtlo             %[vector_64], $ac3                         \n\t" /* odd 4 */
-          "mthi             $zero,        $ac3                         \n\t"
-          "preceu.ph.qbr    %[p2],        %[qload1]                    \n\t"
-          "sb               %[st3],       1(%[dst])                    \n\t" /* odd 1 */
-          "dpa.w.ph         $ac2,         %[p3],          %[filter45]  \n\t" /* odd 3 */
-          "extp             %[Temp2],     $ac2,           31           \n\t" /* odd 3 */
-          "lbux             %[st1],       %[Temp1](%[cm])              \n\t" /* odd 2 */
-
-          /* odd 4. pixel */
-          "mtlo             %[vector_64], $ac1                         \n\t" /* odd 5 */
-          "mthi             $zero,        $ac1                         \n\t"
-          "preceu.ph.qbl    %[p3],        %[qload1]                    \n\t"
-          "sb               %[st1],       3(%[dst])                    \n\t" /* odd 2 */
-          "dpa.w.ph         $ac3,         %[p4],          %[filter45]  \n\t" /* odd 4 */
-          "extp             %[Temp3],     $ac3,           31           \n\t" /* odd 4 */
-          "lbux             %[st2],       %[Temp2](%[cm])              \n\t" /* odd 3 */
-
-          /* odd 5. pixel */
-          "mtlo             %[vector_64], $ac2                         \n\t" /* odd 6 */
-          "mthi             $zero,        $ac2                         \n\t"
-          "sb               %[st2],       5(%[dst])                    \n\t" /* odd 3 */
-          "dpa.w.ph         $ac1,         %[p1],          %[filter45]  \n\t" /* odd 5 */
-          "extp             %[Temp1],     $ac1,           31           \n\t" /* odd 5 */
-          "lbux             %[st3],       %[Temp3](%[cm])              \n\t" /* odd 4 */
-
-          /* odd 6. pixel */
-          "mtlo             %[vector_64], $ac3                         \n\t" /* odd 7 */
-          "mthi             $zero,        $ac3                         \n\t"
-          "sb               %[st3],       7(%[dst])                    \n\t" /* odd 4 */
-          "dpa.w.ph         $ac2,         %[p5],          %[filter45]  \n\t" /* odd 6 */
-          "extp             %[Temp2],     $ac2,           31           \n\t" /* odd 6 */
-          "lbux             %[st1],       %[Temp1](%[cm])              \n\t" /* odd 5 */
-
-          /* odd 7. pixel */
-          "mtlo             %[vector_64], $ac1                         \n\t" /* odd 8 */
-          "mthi             $zero,        $ac1                         \n\t"
-          "sb               %[st1],       9(%[dst])                    \n\t" /* odd 5 */
-          "dpa.w.ph         $ac3,         %[p2],          %[filter45]  \n\t" /* odd 7 */
-          "extp             %[Temp3],     $ac3,           31           \n\t" /* odd 7 */
-
-          /* odd 8. pixel */
-          "dpa.w.ph         $ac1,         %[p3],          %[filter45]  \n\t" /* odd 8 */
-          "extp             %[Temp1],     $ac1,           31           \n\t" /* odd 8 */
-
-          "lbux             %[st2],       %[Temp2](%[cm])              \n\t" /* odd 6 */
-          "lbux             %[st3],       %[Temp3](%[cm])              \n\t" /* odd 7 */
-          "lbux             %[st1],       %[Temp1](%[cm])              \n\t" /* odd 8 */
-
-          "sb               %[st2],       11(%[dst])                   \n\t" /* odd 6 */
-          "sb               %[st3],       13(%[dst])                   \n\t" /* odd 7 */
-          "sb               %[st1],       15(%[dst])                   \n\t" /* odd 8 */
-
-          : [qload1] "=&r" (qload1), [qload2] "=&r" (qload2), [qload3] "=&r" (qload3),
-            [st1] "=&r" (st1), [st2] "=&r" (st2), [st3] "=&r" (st3),
-            [p1] "=&r" (p1), [p2] "=&r" (p2), [p3] "=&r" (p3), [p4] "=&r" (p4),
-            [p5] "=&r" (p5),
-            [Temp1] "=&r" (Temp1), [Temp2] "=&r" (Temp2), [Temp3] "=&r" (Temp3)
-          : [filter45] "r" (filter45), [vector_64] "r" (vector_64),
-            [cm] "r" (cm), [dst] "r" (dst), [src] "r" (src)
-      );
-
-      src += 16;
-      dst += 16;
-    }
-
-    /* Next row... */
-    src_ptr += src_stride;
-    dst_ptr += dst_stride;
-  }
-}
-
-void vpx_convolve2_horiz_dspr2(const uint8_t *src, ptrdiff_t src_stride,
-                               uint8_t *dst, ptrdiff_t dst_stride,
-                               const int16_t *filter_x, int x_step_q4,
-                               const int16_t *filter_y, int y_step_q4,
-                               int w, int h) {
-  if (16 == x_step_q4) {
-    uint32_t pos = 38;
-
-    prefetch_load((const uint8_t *)filter_x);
-
-    /* bit positon for extract from acc */
-    __asm__ __volatile__ (
-      "wrdsp      %[pos],     1           \n\t"
-      :
-      : [pos] "r" (pos)
-    );
-
-    /* prefetch data to cache memory */
-    prefetch_load(src);
-    prefetch_load(src + 32);
-    prefetch_store(dst);
-
-    switch (w) {
-      case 4:
-        convolve_bi_horiz_4_dspr2(src, (int32_t)src_stride,
-                                  dst, (int32_t)dst_stride,
-                                  filter_x, (int32_t)h);
-        break;
-      case 8:
-        convolve_bi_horiz_8_dspr2(src, (int32_t)src_stride,
-                                  dst, (int32_t)dst_stride,
-                                  filter_x, (int32_t)h);
-        break;
-      case 16:
-        convolve_bi_horiz_16_dspr2(src, (int32_t)src_stride,
-                                   dst, (int32_t)dst_stride,
-                                   filter_x, (int32_t)h, 1);
-        break;
-      case 32:
-        convolve_bi_horiz_16_dspr2(src, (int32_t)src_stride,
-                                   dst, (int32_t)dst_stride,
-                                   filter_x, (int32_t)h, 2);
-        break;
-      case 64:
-        prefetch_load(src + 64);
-        prefetch_store(dst + 32);
-
-        convolve_bi_horiz_64_dspr2(src, (int32_t)src_stride,
-                                   dst, (int32_t)dst_stride,
-                                   filter_x, (int32_t)h);
-        break;
-      default:
-        vpx_convolve8_horiz_c(src, src_stride,
-                              dst, dst_stride,
-                              filter_x, x_step_q4,
-                              filter_y, y_step_q4,
-                              w, h);
-        break;
-    }
-  } else {
-    vpx_convolve8_horiz_c(src, src_stride,
-                          dst, dst_stride,
-                          filter_x, x_step_q4,
-                          filter_y, y_step_q4,
-                          w, h);
-  }
-}
-#endif
--- a/vpx_dsp/mips/vpx_convolve2_vert_dspr2.c
+++ /dev/null
@@ -1,264 +1,0 @@
-/*
- *  Copyright (c) 2013 The WebM project authors. All Rights Reserved.
- *
- *  Use of this source code is governed by a BSD-style license
- *  that can be found in the LICENSE file in the root of the source
- *  tree. An additional intellectual property rights grant can be found
- *  in the file PATENTS.  All contributing project authors may
- *  be found in the AUTHORS file in the root of the source tree.
- */
-
-#include <assert.h>
-#include <stdio.h>
-
-#include "./vpx_dsp_rtcd.h"
-#include "vpx_dsp/mips/vpx_common_dspr2.h"
-#include "vpx_dsp/vpx_convolve.h"
-#include "vpx_dsp/vpx_dsp_common.h"
-#include "vpx_ports/mem.h"
-
-#if HAVE_DSPR2
-static void convolve_bi_vert_4_dspr2(const uint8_t *src,
-                                     int32_t src_stride,
-                                     uint8_t *dst,
-                                     int32_t dst_stride,
-                                     const int16_t *filter_y,
-                                     int32_t w,
-                                     int32_t h) {
-  int32_t       x, y;
-  const uint8_t *src_ptr;
-  uint8_t       *dst_ptr;
-  uint8_t       *cm = vpx_ff_cropTbl;
-  uint32_t      vector4a = 64;
-  uint32_t      load1, load2;
-  uint32_t      p1, p2;
-  uint32_t      scratch1;
-  uint32_t      store1, store2;
-  int32_t       Temp1, Temp2;
-  const int16_t *filter = &filter_y[3];
-  uint32_t      filter45;
-
-  filter45 = ((const int32_t *)filter)[0];
-
-  for (y = h; y--;) {
-    /* prefetch data to cache memory */
-    prefetch_store(dst + dst_stride);
-
-    for (x = 0; x < w; x += 4) {
-      src_ptr = src + x;
-      dst_ptr = dst + x;
-
-      __asm__ __volatile__ (
-          "ulw              %[load1],     0(%[src_ptr])                   \n\t"
-          "add              %[src_ptr],   %[src_ptr],     %[src_stride]   \n\t"
-          "ulw              %[load2],     0(%[src_ptr])                   \n\t"
-
-          "mtlo             %[vector4a],  $ac0                            \n\t"
-          "mtlo             %[vector4a],  $ac1                            \n\t"
-          "mtlo             %[vector4a],  $ac2                            \n\t"
-          "mtlo             %[vector4a],  $ac3                            \n\t"
-          "mthi             $zero,        $ac0                            \n\t"
-          "mthi             $zero,        $ac1                            \n\t"
-          "mthi             $zero,        $ac2                            \n\t"
-          "mthi             $zero,        $ac3                            \n\t"
-
-          "preceu.ph.qbr    %[scratch1],  %[load1]                        \n\t"
-          "preceu.ph.qbr    %[p1],        %[load2]                        \n\t"
-
-          "precrq.ph.w      %[p2],        %[p1],          %[scratch1]     \n\t" /* pixel 2 */
-          "append           %[p1],        %[scratch1],    16              \n\t" /* pixel 1 */
-
-          "dpa.w.ph         $ac0,         %[p1],          %[filter45]     \n\t"
-          "dpa.w.ph         $ac1,         %[p2],          %[filter45]     \n\t"
-
-          "preceu.ph.qbl    %[scratch1],  %[load1]                        \n\t"
-          "preceu.ph.qbl    %[p1],        %[load2]                        \n\t"
-
-          "precrq.ph.w      %[p2],        %[p1],          %[scratch1]     \n\t" /* pixel 2 */
-          "append           %[p1],        %[scratch1],    16              \n\t" /* pixel 1 */
-
-          "dpa.w.ph         $ac2,         %[p1],          %[filter45]     \n\t"
-          "dpa.w.ph         $ac3,         %[p2],          %[filter45]     \n\t"
-
-          "extp             %[Temp1],     $ac0,           31              \n\t"
-          "extp             %[Temp2],     $ac1,           31              \n\t"
-
-          "lbux             %[store1],    %[Temp1](%[cm])                 \n\t"
-          "extp             %[Temp1],     $ac2,           31              \n\t"
-
-          "lbux             %[store2],    %[Temp2](%[cm])                 \n\t"
-          "extp             %[Temp2],     $ac3,           31              \n\t"
-
-          "sb               %[store1],    0(%[dst_ptr])                   \n\t"
-          "sb               %[store2],    1(%[dst_ptr])                   \n\t"
-
-          "lbux             %[store1],    %[Temp1](%[cm])                 \n\t"
-          "lbux             %[store2],    %[Temp2](%[cm])                 \n\t"
-
-          "sb               %[store1],    2(%[dst_ptr])                   \n\t"
-          "sb               %[store2],    3(%[dst_ptr])                   \n\t"
-
-          : [load1] "=&r" (load1), [load2] "=&r" (load2),
-            [p1] "=&r" (p1), [p2] "=&r" (p2),
-            [scratch1] "=&r" (scratch1),
-            [Temp1] "=&r" (Temp1), [Temp2] "=&r" (Temp2),
-            [store1] "=&r" (store1), [store2] "=&r" (store2),
-            [src_ptr] "+r" (src_ptr)
-          : [filter45] "r" (filter45),[vector4a] "r" (vector4a),
-            [src_stride] "r" (src_stride),
-            [cm] "r" (cm), [dst_ptr] "r" (dst_ptr)
-      );
-    }
-
-    /* Next row... */
-    src += src_stride;
-    dst += dst_stride;
-  }
-}
-
-static void convolve_bi_vert_64_dspr2(const uint8_t *src,
-                                      int32_t src_stride,
-                                      uint8_t *dst,
-                                      int32_t dst_stride,
-                                      const int16_t *filter_y,
-                                      int32_t h) {
-  int32_t       x, y;
-  const uint8_t *src_ptr;
-  uint8_t       *dst_ptr;
-  uint8_t       *cm = vpx_ff_cropTbl;
-  uint32_t      vector4a = 64;
-  uint32_t      load1, load2;
-  uint32_t      p1, p2;
-  uint32_t      scratch1;
-  uint32_t      store1, store2;
-  int32_t       Temp1, Temp2;
-  const int16_t *filter = &filter_y[3];
-  uint32_t      filter45;
-
-  filter45 = ((const int32_t *)filter)[0];
-
-  for (y = h; y--;) {
-    /* prefetch data to cache memory */
-    prefetch_store(dst + dst_stride);
-
-    for (x = 0; x < 64; x += 4) {
-      src_ptr = src + x;
-      dst_ptr = dst + x;
-
-      __asm__ __volatile__ (
-          "ulw              %[load1],     0(%[src_ptr])                   \n\t"
-          "add              %[src_ptr],   %[src_ptr],     %[src_stride]   \n\t"
-          "ulw              %[load2],     0(%[src_ptr])                   \n\t"
-
-          "mtlo             %[vector4a],  $ac0                            \n\t"
-          "mtlo             %[vector4a],  $ac1                            \n\t"
-          "mtlo             %[vector4a],  $ac2                            \n\t"
-          "mtlo             %[vector4a],  $ac3                            \n\t"
-          "mthi             $zero,        $ac0                            \n\t"
-          "mthi             $zero,        $ac1                            \n\t"
-          "mthi             $zero,        $ac2                            \n\t"
-          "mthi             $zero,        $ac3                            \n\t"
-
-          "preceu.ph.qbr    %[scratch1],  %[load1]                        \n\t"
-          "preceu.ph.qbr    %[p1],        %[load2]                        \n\t"
-
-          "precrq.ph.w      %[p2],        %[p1],          %[scratch1]     \n\t" /* pixel 2 */
-          "append           %[p1],        %[scratch1],    16              \n\t" /* pixel 1 */
-
-          "dpa.w.ph         $ac0,         %[p1],          %[filter45]     \n\t"
-          "dpa.w.ph         $ac1,         %[p2],          %[filter45]     \n\t"
-
-          "preceu.ph.qbl    %[scratch1],  %[load1]                        \n\t"
-          "preceu.ph.qbl    %[p1],        %[load2]                        \n\t"
-
-          "precrq.ph.w      %[p2],        %[p1],          %[scratch1]     \n\t" /* pixel 2 */
-          "append           %[p1],        %[scratch1],    16              \n\t" /* pixel 1 */
-
-          "dpa.w.ph         $ac2,         %[p1],          %[filter45]     \n\t"
-          "dpa.w.ph         $ac3,         %[p2],          %[filter45]     \n\t"
-
-          "extp             %[Temp1],     $ac0,           31              \n\t"
-          "extp             %[Temp2],     $ac1,           31              \n\t"
-
-          "lbux             %[store1],    %[Temp1](%[cm])                 \n\t"
-          "extp             %[Temp1],     $ac2,           31              \n\t"
-
-          "lbux             %[store2],    %[Temp2](%[cm])                 \n\t"
-          "extp             %[Temp2],     $ac3,           31              \n\t"
-
-          "sb               %[store1],    0(%[dst_ptr])                   \n\t"
-          "sb               %[store2],    1(%[dst_ptr])                   \n\t"
-
-          "lbux             %[store1],    %[Temp1](%[cm])                 \n\t"
-          "lbux             %[store2],    %[Temp2](%[cm])                 \n\t"
-
-          "sb               %[store1],    2(%[dst_ptr])                   \n\t"
-          "sb               %[store2],    3(%[dst_ptr])                   \n\t"
-
-          : [load1] "=&r" (load1), [load2] "=&r" (load2),
-            [p1] "=&r" (p1), [p2] "=&r" (p2),
-            [scratch1] "=&r" (scratch1),
-            [Temp1] "=&r" (Temp1), [Temp2] "=&r" (Temp2),
-            [store1] "=&r" (store1), [store2] "=&r" (store2),
-            [src_ptr] "+r" (src_ptr)
-          : [filter45] "r" (filter45),[vector4a] "r" (vector4a),
-            [src_stride] "r" (src_stride),
-            [cm] "r" (cm), [dst_ptr] "r" (dst_ptr)
-      );
-    }
-
-    /* Next row... */
-    src += src_stride;
-    dst += dst_stride;
-  }
-}
-
-void vpx_convolve2_vert_dspr2(const uint8_t *src, ptrdiff_t src_stride,
-                              uint8_t *dst, ptrdiff_t dst_stride,
-                              const int16_t *filter_x, int x_step_q4,
-                              const int16_t *filter_y, int y_step_q4,
-                              int w, int h) {
-  if (16 == y_step_q4) {
-    uint32_t pos = 38;
-
-    /* bit positon for extract from acc */
-    __asm__ __volatile__ (
-      "wrdsp      %[pos],     1           \n\t"
-      :
-      : [pos] "r" (pos)
-    );
-
-    prefetch_store(dst);
-
-    switch (w) {
-      case 4 :
-      case 8 :
-      case 16 :
-      case 32 :
-        convolve_bi_vert_4_dspr2(src, src_stride,
-                                 dst, dst_stride,
-                                 filter_y, w, h);
-        break;
-      case 64 :
-        prefetch_store(dst + 32);
-        convolve_bi_vert_64_dspr2(src, src_stride,
-                                  dst, dst_stride,
-                                  filter_y, h);
-        break;
-      default:
-        vpx_convolve8_vert_c(src, src_stride,
-                             dst, dst_stride,
-                             filter_x, x_step_q4,
-                             filter_y, y_step_q4,
-                             w, h);
-        break;
-    }
-  } else {
-    vpx_convolve8_vert_c(src, src_stride,
-                         dst, dst_stride,
-                         filter_x, x_step_q4,
-                         filter_y, y_step_q4,
-                         w, h);
-  }
-}
-#endif
--- a/vpx_dsp/mips/vpx_convolve8_avg_dspr2.c
+++ /dev/null
@@ -1,693 +1,0 @@
-/*
- *  Copyright (c) 2013 The WebM project authors. All Rights Reserved.
- *
- *  Use of this source code is governed by a BSD-style license
- *  that can be found in the LICENSE file in the root of the source
- *  tree. An additional intellectual property rights grant can be found
- *  in the file PATENTS.  All contributing project authors may
- *  be found in the AUTHORS file in the root of the source tree.
- */
-
-#include <assert.h>
-#include <stdio.h>
-
-#include "./vpx_dsp_rtcd.h"
-#include "vpx_dsp/mips/vpx_common_dspr2.h"
-#include "vpx_dsp/vpx_convolve.h"
-#include "vpx_dsp/vpx_dsp_common.h"
-#include "vpx_ports/mem.h"
-
-#if HAVE_DSPR2
-static void convolve_avg_vert_4_dspr2(const uint8_t *src,
-                                      int32_t src_stride,
-                                      uint8_t *dst,
-                                      int32_t dst_stride,
-                                      const int16_t *filter_y,
-                                      int32_t w,
-                                      int32_t h) {
-  int32_t       x, y;
-  const uint8_t *src_ptr;
-  uint8_t       *dst_ptr;
-  uint8_t       *cm = vpx_ff_cropTbl;
-  uint32_t      vector4a = 64;
-  uint32_t      load1, load2, load3, load4;
-  uint32_t      p1, p2;
-  uint32_t      n1, n2;
-  uint32_t      scratch1, scratch2;
-  uint32_t      store1, store2;
-  int32_t       vector1b, vector2b, vector3b, vector4b;
-  int32_t       Temp1, Temp2;
-
-  vector1b = ((const int32_t *)filter_y)[0];
-  vector2b = ((const int32_t *)filter_y)[1];
-  vector3b = ((const int32_t *)filter_y)[2];
-  vector4b = ((const int32_t *)filter_y)[3];
-
-  src -= 3 * src_stride;
-
-  for (y = h; y--;) {
-    /* prefetch data to cache memory */
-    prefetch_store(dst + dst_stride);
-
-    for (x = 0; x < w; x += 4) {
-      src_ptr = src + x;
-      dst_ptr = dst + x;
-
-      __asm__ __volatile__ (
-          "ulw              %[load1],     0(%[src_ptr])                   \n\t"
-          "add              %[src_ptr],   %[src_ptr],     %[src_stride]   \n\t"
-          "ulw              %[load2],     0(%[src_ptr])                   \n\t"
-          "add              %[src_ptr],   %[src_ptr],     %[src_stride]   \n\t"
-          "ulw              %[load3],     0(%[src_ptr])                   \n\t"
-          "add              %[src_ptr],   %[src_ptr],     %[src_stride]   \n\t"
-          "ulw              %[load4],     0(%[src_ptr])                   \n\t"
-
-          "mtlo             %[vector4a],  $ac0                            \n\t"
-          "mtlo             %[vector4a],  $ac1                            \n\t"
-          "mtlo             %[vector4a],  $ac2                            \n\t"
-          "mtlo             %[vector4a],  $ac3                            \n\t"
-          "mthi             $zero,        $ac0                            \n\t"
-          "mthi             $zero,        $ac1                            \n\t"
-          "mthi             $zero,        $ac2                            \n\t"
-          "mthi             $zero,        $ac3                            \n\t"
-
-          "preceu.ph.qbr    %[scratch1],  %[load1]                        \n\t"
-          "preceu.ph.qbr    %[p1],        %[load2]                        \n\t"
-          "precrq.ph.w      %[n1],        %[p1],          %[scratch1]     \n\t" /* pixel 2 */
-          "append           %[p1],        %[scratch1],    16              \n\t" /* pixel 1 */
-          "preceu.ph.qbr    %[scratch2],  %[load3]                        \n\t"
-          "preceu.ph.qbr    %[p2],        %[load4]                        \n\t"
-          "precrq.ph.w      %[n2],        %[p2],          %[scratch2]     \n\t" /* pixel 2 */
-          "append           %[p2],        %[scratch2],    16              \n\t" /* pixel 1 */
-
-          "dpa.w.ph         $ac0,         %[p1],          %[vector1b]     \n\t"
-          "dpa.w.ph         $ac0,         %[p2],          %[vector2b]     \n\t"
-          "dpa.w.ph         $ac1,         %[n1],          %[vector1b]     \n\t"
-          "dpa.w.ph         $ac1,         %[n2],          %[vector2b]     \n\t"
-
-          "preceu.ph.qbl    %[scratch1],  %[load1]                        \n\t"
-          "preceu.ph.qbl    %[p1],        %[load2]                        \n\t"
-          "precrq.ph.w      %[n1],        %[p1],          %[scratch1]     \n\t" /* pixel 2 */
-          "append           %[p1],        %[scratch1],    16              \n\t" /* pixel 1 */
-          "preceu.ph.qbl    %[scratch2],  %[load3]                        \n\t"
-          "preceu.ph.qbl    %[p2],        %[load4]                        \n\t"
-          "precrq.ph.w      %[n2],        %[p2],          %[scratch2]     \n\t" /* pixel 2 */
-          "append           %[p2],        %[scratch2],    16              \n\t" /* pixel 1 */
-
-          "dpa.w.ph         $ac2,         %[p1],          %[vector1b]     \n\t"
-          "dpa.w.ph         $ac2,         %[p2],          %[vector2b]     \n\t"
-          "dpa.w.ph         $ac3,         %[n1],          %[vector1b]     \n\t"
-          "dpa.w.ph         $ac3,         %[n2],          %[vector2b]     \n\t"
-
-          "add              %[src_ptr],   %[src_ptr],     %[src_stride]   \n\t"
-          "ulw              %[load1],     0(%[src_ptr])                   \n\t"
-          "add              %[src_ptr],   %[src_ptr],     %[src_stride]   \n\t"
-          "ulw              %[load2],     0(%[src_ptr])                   \n\t"
-          "add              %[src_ptr],   %[src_ptr],     %[src_stride]   \n\t"
-          "ulw              %[load3],     0(%[src_ptr])                   \n\t"
-          "add              %[src_ptr],   %[src_ptr],     %[src_stride]   \n\t"
-          "ulw              %[load4],     0(%[src_ptr])                   \n\t"
-
-          "preceu.ph.qbr    %[scratch1],  %[load1]                        \n\t"
-          "preceu.ph.qbr    %[p1],        %[load2]                        \n\t"
-          "precrq.ph.w      %[n1],        %[p1],          %[scratch1]     \n\t" /* pixel 2 */
-          "append           %[p1],        %[scratch1],    16              \n\t" /* pixel 1 */
-          "preceu.ph.qbr    %[scratch2],  %[load3]                        \n\t"
-          "preceu.ph.qbr    %[p2],        %[load4]                        \n\t"
-          "precrq.ph.w      %[n2],        %[p2],          %[scratch2]     \n\t" /* pixel 2 */
-          "append           %[p2],        %[scratch2],    16              \n\t" /* pixel 1 */
-
-          "dpa.w.ph         $ac0,         %[p1],          %[vector3b]     \n\t"
-          "dpa.w.ph         $ac0,         %[p2],          %[vector4b]     \n\t"
-          "extp             %[Temp1],     $ac0,           31              \n\t"
-          "dpa.w.ph         $ac1,         %[n1],          %[vector3b]     \n\t"
-          "dpa.w.ph         $ac1,         %[n2],          %[vector4b]     \n\t"
-          "extp             %[Temp2],     $ac1,           31              \n\t"
-
-          "preceu.ph.qbl    %[scratch1],  %[load1]                        \n\t"
-          "preceu.ph.qbl    %[p1],        %[load2]                        \n\t"
-          "precrq.ph.w      %[n1],        %[p1],          %[scratch1]     \n\t" /* pixel 2 */
-          "append           %[p1],        %[scratch1],    16              \n\t" /* pixel 1 */
-          "lbu              %[scratch1],  0(%[dst_ptr])                   \n\t"
-          "preceu.ph.qbl    %[scratch2],  %[load3]                        \n\t"
-          "preceu.ph.qbl    %[p2],        %[load4]                        \n\t"
-          "precrq.ph.w      %[n2],        %[p2],          %[scratch2]     \n\t" /* pixel 2 */
-          "append           %[p2],        %[scratch2],    16              \n\t" /* pixel 1 */
-          "lbu              %[scratch2],  1(%[dst_ptr])                   \n\t"
-
-          "lbux             %[store1],    %[Temp1](%[cm])                 \n\t"
-          "dpa.w.ph         $ac2,         %[p1],          %[vector3b]     \n\t"
-          "dpa.w.ph         $ac2,         %[p2],          %[vector4b]     \n\t"
-          "addqh_r.w        %[store1],    %[store1],      %[scratch1]     \n\t" /* pixel 1 */
-          "extp             %[Temp1],     $ac2,           31              \n\t"
-
-          "lbux             %[store2],    %[Temp2](%[cm])                 \n\t"
-          "dpa.w.ph         $ac3,         %[n1],          %[vector3b]     \n\t"
-          "dpa.w.ph         $ac3,         %[n2],          %[vector4b]     \n\t"
-          "addqh_r.w        %[store2],    %[store2],      %[scratch2]     \n\t" /* pixel 2 */
-          "extp             %[Temp2],     $ac3,           31              \n\t"
-          "lbu              %[scratch1],  2(%[dst_ptr])                   \n\t"
-
-          "sb               %[store1],    0(%[dst_ptr])                   \n\t"
-          "sb               %[store2],    1(%[dst_ptr])                   \n\t"
-          "lbu              %[scratch2],  3(%[dst_ptr])                   \n\t"
-
-          "lbux             %[store1],    %[Temp1](%[cm])                 \n\t"
-          "lbux             %[store2],    %[Temp2](%[cm])                 \n\t"
-          "addqh_r.w        %[store1],    %[store1],      %[scratch1]     \n\t" /* pixel 3 */
-          "addqh_r.w        %[store2],    %[store2],      %[scratch2]     \n\t" /* pixel 4 */
-
-          "sb               %[store1],    2(%[dst_ptr])                   \n\t"
-          "sb               %[store2],    3(%[dst_ptr])                   \n\t"
-
-          : [load1] "=&r" (load1), [load2] "=&r" (load2),
-            [load3] "=&r" (load3), [load4] "=&r" (load4),
-            [p1] "=&r" (p1), [p2] "=&r" (p2), [n1] "=&r" (n1), [n2] "=&r" (n2),
-            [scratch1] "=&r" (scratch1), [scratch2] "=&r" (scratch2),
-            [Temp1] "=&r" (Temp1), [Temp2] "=&r" (Temp2),
-            [store1] "=&r" (store1), [store2] "=&r" (store2),
-            [src_ptr] "+r" (src_ptr)
-          : [vector1b] "r" (vector1b), [vector2b] "r" (vector2b),
-            [vector3b] "r" (vector3b), [vector4b] "r" (vector4b),
-            [vector4a] "r" (vector4a),
-            [src_stride] "r" (src_stride), [cm] "r" (cm), [dst_ptr] "r" (dst_ptr)
-      );
-    }
-
-    /* Next row... */
-    src += src_stride;
-    dst += dst_stride;
-  }
-}
-
-static void convolve_avg_vert_64_dspr2(const uint8_t *src,
-                                       int32_t src_stride,
-                                       uint8_t *dst,
-                                       int32_t dst_stride,
-                                       const int16_t *filter_y,
-                                       int32_t h) {
-  int32_t       x, y;
-  const uint8_t *src_ptr;
-  uint8_t       *dst_ptr;
-  uint8_t       *cm = vpx_ff_cropTbl;
-  uint32_t      vector4a = 64;
-  uint32_t      load1, load2, load3, load4;
-  uint32_t      p1, p2;
-  uint32_t      n1, n2;
-  uint32_t      scratch1, scratch2;
-  uint32_t      store1, store2;
-  int32_t       vector1b, vector2b, vector3b, vector4b;
-  int32_t       Temp1, Temp2;
-
-  vector1b = ((const int32_t *)filter_y)[0];
-  vector2b = ((const int32_t *)filter_y)[1];
-  vector3b = ((const int32_t *)filter_y)[2];
-  vector4b = ((const int32_t *)filter_y)[3];
-
-  src -= 3 * src_stride;
-
-  for (y = h; y--;) {
-    /* prefetch data to cache memory */
-    prefetch_store(dst + dst_stride);
-    prefetch_store(dst + dst_stride + 32);
-
-    for (x = 0; x < 64; x += 4) {
-      src_ptr = src + x;
-      dst_ptr = dst + x;
-
-      __asm__ __volatile__ (
-          "ulw              %[load1],     0(%[src_ptr])                   \n\t"
-          "add              %[src_ptr],   %[src_ptr],     %[src_stride]   \n\t"
-          "ulw              %[load2],     0(%[src_ptr])                   \n\t"
-          "add              %[src_ptr],   %[src_ptr],     %[src_stride]   \n\t"
-          "ulw              %[load3],     0(%[src_ptr])                   \n\t"
-          "add              %[src_ptr],   %[src_ptr],     %[src_stride]   \n\t"
-          "ulw              %[load4],     0(%[src_ptr])                   \n\t"
-
-          "mtlo             %[vector4a],  $ac0                            \n\t"
-          "mtlo             %[vector4a],  $ac1                            \n\t"
-          "mtlo             %[vector4a],  $ac2                            \n\t"
-          "mtlo             %[vector4a],  $ac3                            \n\t"
-          "mthi             $zero,        $ac0                            \n\t"
-          "mthi             $zero,        $ac1                            \n\t"
-          "mthi             $zero,        $ac2                            \n\t"
-          "mthi             $zero,        $ac3                            \n\t"
-
-          "preceu.ph.qbr    %[scratch1],  %[load1]                        \n\t"
-          "preceu.ph.qbr    %[p1],        %[load2]                        \n\t"
-          "precrq.ph.w      %[n1],        %[p1],          %[scratch1]     \n\t" /* pixel 2 */
-          "append           %[p1],        %[scratch1],    16              \n\t" /* pixel 1 */
-          "preceu.ph.qbr    %[scratch2],  %[load3]                        \n\t"
-          "preceu.ph.qbr    %[p2],        %[load4]                        \n\t"
-          "precrq.ph.w      %[n2],        %[p2],          %[scratch2]     \n\t" /* pixel 2 */
-          "append           %[p2],        %[scratch2],    16              \n\t" /* pixel 1 */
-
-          "dpa.w.ph         $ac0,         %[p1],          %[vector1b]     \n\t"
-          "dpa.w.ph         $ac0,         %[p2],          %[vector2b]     \n\t"
-          "dpa.w.ph         $ac1,         %[n1],          %[vector1b]     \n\t"
-          "dpa.w.ph         $ac1,         %[n2],          %[vector2b]     \n\t"
-
-          "preceu.ph.qbl    %[scratch1],  %[load1]                        \n\t"
-          "preceu.ph.qbl    %[p1],        %[load2]                        \n\t"
-          "precrq.ph.w      %[n1],        %[p1],          %[scratch1]     \n\t" /* pixel 2 */
-          "append           %[p1],        %[scratch1],    16              \n\t" /* pixel 1 */
-          "preceu.ph.qbl    %[scratch2],  %[load3]                        \n\t"
-          "preceu.ph.qbl    %[p2],        %[load4]                        \n\t"
-          "precrq.ph.w      %[n2],        %[p2],          %[scratch2]     \n\t" /* pixel 2 */
-          "append           %[p2],        %[scratch2],    16              \n\t" /* pixel 1 */
-
-          "dpa.w.ph         $ac2,         %[p1],          %[vector1b]     \n\t"
-          "dpa.w.ph         $ac2,         %[p2],          %[vector2b]     \n\t"
-          "dpa.w.ph         $ac3,         %[n1],          %[vector1b]     \n\t"
-          "dpa.w.ph         $ac3,         %[n2],          %[vector2b]     \n\t"
-
-          "add              %[src_ptr],   %[src_ptr],     %[src_stride]   \n\t"
-          "ulw              %[load1],     0(%[src_ptr])                   \n\t"
-          "add              %[src_ptr],   %[src_ptr],     %[src_stride]   \n\t"
-          "ulw              %[load2],     0(%[src_ptr])                   \n\t"
-          "add              %[src_ptr],   %[src_ptr],     %[src_stride]   \n\t"
-          "ulw              %[load3],     0(%[src_ptr])                   \n\t"
-          "add              %[src_ptr],   %[src_ptr],     %[src_stride]   \n\t"
-          "ulw              %[load4],     0(%[src_ptr])                   \n\t"
-
-          "preceu.ph.qbr    %[scratch1],  %[load1]                        \n\t"
-          "preceu.ph.qbr    %[p1],        %[load2]                        \n\t"
-          "precrq.ph.w      %[n1],        %[p1],          %[scratch1]     \n\t" /* pixel 2 */
-          "append           %[p1],        %[scratch1],    16              \n\t" /* pixel 1 */
-          "preceu.ph.qbr    %[scratch2],  %[load3]                        \n\t"
-          "preceu.ph.qbr    %[p2],        %[load4]                        \n\t"
-          "precrq.ph.w      %[n2],        %[p2],          %[scratch2]     \n\t" /* pixel 2 */
-          "append           %[p2],        %[scratch2],    16              \n\t" /* pixel 1 */
-
-          "dpa.w.ph         $ac0,         %[p1],          %[vector3b]     \n\t"
-          "dpa.w.ph         $ac0,         %[p2],          %[vector4b]     \n\t"
-          "extp             %[Temp1],     $ac0,           31              \n\t"
-          "dpa.w.ph         $ac1,         %[n1],          %[vector3b]     \n\t"
-          "dpa.w.ph         $ac1,         %[n2],          %[vector4b]     \n\t"
-          "extp             %[Temp2],     $ac1,           31              \n\t"
-
-          "preceu.ph.qbl    %[scratch1],  %[load1]                        \n\t"
-          "preceu.ph.qbl    %[p1],        %[load2]                        \n\t"
-          "precrq.ph.w      %[n1],        %[p1],          %[scratch1]     \n\t" /* pixel 2 */
-          "append           %[p1],        %[scratch1],    16              \n\t" /* pixel 1 */
-          "lbu              %[scratch1],  0(%[dst_ptr])                   \n\t"
-          "preceu.ph.qbl    %[scratch2],  %[load3]                        \n\t"
-          "preceu.ph.qbl    %[p2],        %[load4]                        \n\t"
-          "precrq.ph.w      %[n2],        %[p2],          %[scratch2]     \n\t" /* pixel 2 */
-          "append           %[p2],        %[scratch2],    16              \n\t" /* pixel 1 */
-          "lbu              %[scratch2],  1(%[dst_ptr])                   \n\t"
-
-          "lbux             %[store1],    %[Temp1](%[cm])                 \n\t"
-          "dpa.w.ph         $ac2,         %[p1],          %[vector3b]     \n\t"
-          "dpa.w.ph         $ac2,         %[p2],          %[vector4b]     \n\t"
-          "addqh_r.w        %[store1],    %[store1],      %[scratch1]     \n\t" /* pixel 1 */
-          "extp             %[Temp1],     $ac2,           31              \n\t"
-
-          "lbux             %[store2],    %[Temp2](%[cm])                 \n\t"
-          "dpa.w.ph         $ac3,         %[n1],          %[vector3b]     \n\t"
-          "dpa.w.ph         $ac3,         %[n2],          %[vector4b]     \n\t"
-          "addqh_r.w        %[store2],    %[store2],      %[scratch2]     \n\t" /* pixel 2 */
-          "extp             %[Temp2],     $ac3,           31              \n\t"
-          "lbu              %[scratch1],  2(%[dst_ptr])                   \n\t"
-
-          "sb               %[store1],    0(%[dst_ptr])                   \n\t"
-          "sb               %[store2],    1(%[dst_ptr])                   \n\t"
-          "lbu              %[scratch2],  3(%[dst_ptr])                   \n\t"
-
-          "lbux             %[store1],    %[Temp1](%[cm])                 \n\t"
-          "lbux             %[store2],    %[Temp2](%[cm])                 \n\t"
-          "addqh_r.w        %[store1],    %[store1],      %[scratch1]     \n\t" /* pixel 3 */
-          "addqh_r.w        %[store2],    %[store2],      %[scratch2]     \n\t" /* pixel 4 */
-
-          "sb               %[store1],    2(%[dst_ptr])                   \n\t"
-          "sb               %[store2],    3(%[dst_ptr])                   \n\t"
-
-          : [load1] "=&r" (load1), [load2] "=&r" (load2),
-            [load3] "=&r" (load3), [load4] "=&r" (load4),
-            [p1] "=&r" (p1), [p2] "=&r" (p2), [n1] "=&r" (n1), [n2] "=&r" (n2),
-            [scratch1] "=&r" (scratch1), [scratch2] "=&r" (scratch2),
-            [Temp1] "=&r" (Temp1), [Temp2] "=&r" (Temp2),
-            [store1] "=&r" (store1), [store2] "=&r" (store2),
-            [src_ptr] "+r" (src_ptr)
-          : [vector1b] "r" (vector1b), [vector2b] "r" (vector2b),
-            [vector3b] "r" (vector3b), [vector4b] "r" (vector4b),
-            [vector4a] "r" (vector4a),
-            [src_stride] "r" (src_stride), [cm] "r" (cm), [dst_ptr] "r" (dst_ptr)
-      );
-    }
-
-    /* Next row... */
-    src += src_stride;
-    dst += dst_stride;
-  }
-}
-
-void vpx_convolve8_avg_vert_dspr2(const uint8_t *src, ptrdiff_t src_stride,
-                                  uint8_t *dst, ptrdiff_t dst_stride,
-                                  const int16_t *filter_x, int x_step_q4,
-                                  const int16_t *filter_y, int y_step_q4,
-                                  int w, int h) {
-  if (((const int32_t *)filter_y)[1] == 0x800000) {
-    vpx_convolve_avg(src, src_stride,
-                     dst, dst_stride,
-                     filter_x, x_step_q4,
-                     filter_y, y_step_q4,
-                     w, h);
-  } else if (((const int32_t *)filter_y)[0] == 0) {
-    vpx_convolve2_avg_vert_dspr2(src, src_stride,
-                                 dst, dst_stride,
-                                 filter_x, x_step_q4,
-                                 filter_y, y_step_q4,
-                                 w, h);
-  } else {
-    if (16 == y_step_q4) {
-      uint32_t pos = 38;
-
-      /* bit positon for extract from acc */
-      __asm__ __volatile__ (
-        "wrdsp      %[pos],     1           \n\t"
-        :
-        : [pos] "r" (pos)
-      );
-
-      prefetch_store(dst);
-
-      switch (w) {
-        case 4:
-        case 8:
-        case 16:
-        case 32:
-          convolve_avg_vert_4_dspr2(src, src_stride,
-                                    dst, dst_stride,
-                                    filter_y, w, h);
-          break;
-        case 64:
-          prefetch_store(dst + 32);
-          convolve_avg_vert_64_dspr2(src, src_stride,
-                                     dst, dst_stride,
-                                     filter_y, h);
-          break;
-        default:
-          vpx_convolve8_avg_vert_c(src, src_stride,
-                                   dst, dst_stride,
-                                   filter_x, x_step_q4,
-                                   filter_y, y_step_q4,
-                                   w, h);
-          break;
-      }
-    } else {
-      vpx_convolve8_avg_vert_c(src, src_stride,
-                               dst, dst_stride,
-                               filter_x, x_step_q4,
-                               filter_y, y_step_q4,
-                               w, h);
-    }
-  }
-}
-
-void vpx_convolve8_avg_dspr2(const uint8_t *src, ptrdiff_t src_stride,
-                             uint8_t *dst, ptrdiff_t dst_stride,
-                             const int16_t *filter_x, int x_step_q4,
-                             const int16_t *filter_y, int y_step_q4,
-                             int w, int h) {
-  /* Fixed size intermediate buffer places limits on parameters. */
-  DECLARE_ALIGNED(32, uint8_t, temp[64 * 135]);
-  int32_t intermediate_height = ((h * y_step_q4) >> 4) + 7;
-
-  assert(w <= 64);
-  assert(h <= 64);
-
-  if (intermediate_height < h)
-    intermediate_height = h;
-
-  if (x_step_q4 != 16 || y_step_q4 != 16)
-    return vpx_convolve8_avg_c(src, src_stride,
-                               dst, dst_stride,
-                               filter_x, x_step_q4,
-                               filter_y, y_step_q4,
-                               w, h);
-
-  vpx_convolve8_horiz(src - (src_stride * 3), src_stride,
-                      temp, 64,
-                      filter_x, x_step_q4,
-                      filter_y, y_step_q4,
-                      w, intermediate_height);
-
-  vpx_convolve8_avg_vert(temp + 64 * 3, 64,
-                         dst, dst_stride,
-                         filter_x, x_step_q4,
-                         filter_y, y_step_q4,
-                         w, h);
-}
-
-void vpx_convolve_avg_dspr2(const uint8_t *src, ptrdiff_t src_stride,
-                            uint8_t *dst, ptrdiff_t dst_stride,
-                            const int16_t *filter_x, int filter_x_stride,
-                            const int16_t *filter_y, int filter_y_stride,
-                            int w, int h) {
-  int x, y;
-  uint32_t tp1, tp2, tn1;
-  uint32_t tp3, tp4, tn2;
-
-  /* prefetch data to cache memory */
-  prefetch_load(src);
-  prefetch_load(src + 32);
-  prefetch_store(dst);
-
-  switch (w) {
-    case 4:
-      /* 1 word storage */
-      for (y = h; y--; ) {
-        prefetch_load(src + src_stride);
-        prefetch_load(src + src_stride + 32);
-        prefetch_store(dst + dst_stride);
-
-        __asm__ __volatile__ (
-            "ulw              %[tp1],         0(%[src])      \n\t"
-            "ulw              %[tp2],         0(%[dst])      \n\t"
-            "adduh_r.qb       %[tn1], %[tp2], %[tp1]         \n\t"  /* average */
-            "sw               %[tn1],         0(%[dst])      \n\t"  /* store */
-
-            : [tn1] "=&r" (tn1), [tp1] "=&r" (tp1),
-              [tp2] "=&r" (tp2)
-            : [src] "r" (src), [dst] "r" (dst)
-        );
-
-        src += src_stride;
-        dst += dst_stride;
-      }
-      break;
-    case 8:
-      /* 2 word storage */
-      for (y = h; y--; ) {
-        prefetch_load(src + src_stride);
-        prefetch_load(src + src_stride + 32);
-        prefetch_store(dst + dst_stride);
-
-        __asm__ __volatile__ (
-            "ulw              %[tp1],         0(%[src])      \n\t"
-            "ulw              %[tp2],         0(%[dst])      \n\t"
-            "ulw              %[tp3],         4(%[src])      \n\t"
-            "ulw              %[tp4],         4(%[dst])      \n\t"
-            "adduh_r.qb       %[tn1], %[tp2], %[tp1]         \n\t"  /* average */
-            "sw               %[tn1],         0(%[dst])      \n\t"  /* store */
-            "adduh_r.qb       %[tn2], %[tp3], %[tp4]         \n\t"  /* average */
-            "sw               %[tn2],         4(%[dst])      \n\t"  /* store */
-
-            : [tp1] "=&r" (tp1), [tp2] "=&r" (tp2),
-              [tp3] "=&r" (tp3), [tp4] "=&r" (tp4),
-              [tn1] "=&r" (tn1), [tn2] "=&r" (tn2)
-            : [src] "r" (src), [dst] "r" (dst)
-        );
-
-        src += src_stride;
-        dst += dst_stride;
-      }
-      break;
-    case 16:
-      /* 4 word storage */
-      for (y = h; y--; ) {
-        prefetch_load(src + src_stride);
-        prefetch_load(src + src_stride + 32);
-        prefetch_store(dst + dst_stride);
-
-        __asm__ __volatile__ (
-            "ulw              %[tp1],         0(%[src])      \n\t"
-            "ulw              %[tp2],         0(%[dst])      \n\t"
-            "ulw              %[tp3],         4(%[src])      \n\t"
-            "ulw              %[tp4],         4(%[dst])      \n\t"
-            "adduh_r.qb       %[tn1], %[tp2], %[tp1]         \n\t"  /* average */
-            "ulw              %[tp1],         8(%[src])      \n\t"
-            "ulw              %[tp2],         8(%[dst])      \n\t"
-            "sw               %[tn1],         0(%[dst])      \n\t"  /* store */
-            "adduh_r.qb       %[tn2], %[tp3], %[tp4]         \n\t"  /* average */
-            "sw               %[tn2],         4(%[dst])      \n\t"  /* store */
-            "ulw              %[tp3],         12(%[src])     \n\t"
-            "ulw              %[tp4],         12(%[dst])     \n\t"
-            "adduh_r.qb       %[tn1], %[tp2], %[tp1]         \n\t"  /* average */
-            "sw               %[tn1],         8(%[dst])      \n\t"  /* store */
-            "adduh_r.qb       %[tn2], %[tp3], %[tp4]         \n\t"  /* average */
-            "sw               %[tn2],         12(%[dst])     \n\t"  /* store */
-
-            : [tp1] "=&r" (tp1), [tp2] "=&r" (tp2),
-              [tp3] "=&r" (tp3), [tp4] "=&r" (tp4),
-              [tn1] "=&r" (tn1), [tn2] "=&r" (tn2)
-            : [src] "r" (src), [dst] "r" (dst)
-        );
-
-        src += src_stride;
-        dst += dst_stride;
-      }
-      break;
-    case 32:
-      /* 8 word storage */
-      for (y = h; y--; ) {
-        prefetch_load(src + src_stride);
-        prefetch_load(src + src_stride + 32);
-        prefetch_store(dst + dst_stride);
-
-        __asm__ __volatile__ (
-            "ulw              %[tp1],         0(%[src])      \n\t"
-            "ulw              %[tp2],         0(%[dst])      \n\t"
-            "ulw              %[tp3],         4(%[src])      \n\t"
-            "ulw              %[tp4],         4(%[dst])      \n\t"
-            "adduh_r.qb       %[tn1], %[tp2], %[tp1]         \n\t"  /* average */
-            "ulw              %[tp1],         8(%[src])      \n\t"
-            "ulw              %[tp2],         8(%[dst])      \n\t"
-            "sw               %[tn1],         0(%[dst])      \n\t"  /* store */
-            "adduh_r.qb       %[tn2], %[tp3], %[tp4]         \n\t"  /* average */
-            "sw               %[tn2],         4(%[dst])      \n\t"  /* store */
-            "ulw              %[tp3],         12(%[src])     \n\t"
-            "ulw              %[tp4],         12(%[dst])     \n\t"
-            "adduh_r.qb       %[tn1], %[tp2], %[tp1]         \n\t"  /* average */
-            "ulw              %[tp1],         16(%[src])     \n\t"
-            "ulw              %[tp2],         16(%[dst])     \n\t"
-            "sw               %[tn1],         8(%[dst])      \n\t"  /* store */
-            "adduh_r.qb       %[tn2], %[tp3], %[tp4]         \n\t"  /* average */
-            "sw               %[tn2],         12(%[dst])     \n\t"  /* store */
-            "ulw              %[tp3],         20(%[src])     \n\t"
-            "ulw              %[tp4],         20(%[dst])     \n\t"
-            "adduh_r.qb       %[tn1], %[tp2], %[tp1]         \n\t"  /* average */
-            "ulw              %[tp1],         24(%[src])     \n\t"
-            "ulw              %[tp2],         24(%[dst])     \n\t"
-            "sw               %[tn1],         16(%[dst])     \n\t"  /* store */
-            "adduh_r.qb       %[tn2], %[tp3], %[tp4]         \n\t"  /* average */
-            "sw               %[tn2],         20(%[dst])     \n\t"  /* store */
-            "ulw              %[tp3],         28(%[src])     \n\t"
-            "ulw              %[tp4],         28(%[dst])     \n\t"
-            "adduh_r.qb       %[tn1], %[tp2], %[tp1]         \n\t"  /* average */
-            "sw               %[tn1],         24(%[dst])     \n\t"  /* store */
-            "adduh_r.qb       %[tn2], %[tp3], %[tp4]         \n\t"  /* average */
-            "sw               %[tn2],         28(%[dst])     \n\t"  /* store */
-
-            : [tp1] "=&r" (tp1), [tp2] "=&r" (tp2),
-              [tp3] "=&r" (tp3), [tp4] "=&r" (tp4),
-              [tn1] "=&r" (tn1), [tn2] "=&r" (tn2)
-            : [src] "r" (src), [dst] "r" (dst)
-        );
-
-        src += src_stride;
-        dst += dst_stride;
-      }
-      break;
-    case 64:
-      prefetch_load(src + 64);
-      prefetch_store(dst + 32);
-
-      /* 16 word storage */
-      for (y = h; y--; ) {
-        prefetch_load(src + src_stride);
-        prefetch_load(src + src_stride + 32);
-        prefetch_load(src + src_stride + 64);
-        prefetch_store(dst + dst_stride);
-        prefetch_store(dst + dst_stride + 32);
-
-        __asm__ __volatile__ (
-            "ulw              %[tp1],         0(%[src])      \n\t"
-            "ulw              %[tp2],         0(%[dst])      \n\t"
-            "ulw              %[tp3],         4(%[src])      \n\t"
-            "ulw              %[tp4],         4(%[dst])      \n\t"
-            "adduh_r.qb       %[tn1], %[tp2], %[tp1]         \n\t"  /* average */
-            "ulw              %[tp1],         8(%[src])      \n\t"
-            "ulw              %[tp2],         8(%[dst])      \n\t"
-            "sw               %[tn1],         0(%[dst])      \n\t"  /* store */
-            "adduh_r.qb       %[tn2], %[tp3], %[tp4]         \n\t"  /* average */
-            "sw               %[tn2],         4(%[dst])      \n\t"  /* store */
-            "ulw              %[tp3],         12(%[src])     \n\t"
-            "ulw              %[tp4],         12(%[dst])     \n\t"
-            "adduh_r.qb       %[tn1], %[tp2], %[tp1]         \n\t"  /* average */
-            "ulw              %[tp1],         16(%[src])     \n\t"
-            "ulw              %[tp2],         16(%[dst])     \n\t"
-            "sw               %[tn1],         8(%[dst])      \n\t"  /* store */
-            "adduh_r.qb       %[tn2], %[tp3], %[tp4]         \n\t"  /* average */
-            "sw               %[tn2],         12(%[dst])     \n\t"  /* store */
-            "ulw              %[tp3],         20(%[src])     \n\t"
-            "ulw              %[tp4],         20(%[dst])     \n\t"
-            "adduh_r.qb       %[tn1], %[tp2], %[tp1]         \n\t"  /* average */
-            "ulw              %[tp1],         24(%[src])     \n\t"
-            "ulw              %[tp2],         24(%[dst])     \n\t"
-            "sw               %[tn1],         16(%[dst])     \n\t"  /* store */
-            "adduh_r.qb       %[tn2], %[tp3], %[tp4]         \n\t"  /* average */
-            "sw               %[tn2],         20(%[dst])     \n\t"  /* store */
-            "ulw              %[tp3],         28(%[src])     \n\t"
-            "ulw              %[tp4],         28(%[dst])     \n\t"
-            "adduh_r.qb       %[tn1], %[tp2], %[tp1]         \n\t"  /* average */
-            "ulw              %[tp1],         32(%[src])     \n\t"
-            "ulw              %[tp2],         32(%[dst])     \n\t"
-            "sw               %[tn1],         24(%[dst])     \n\t"  /* store */
-            "adduh_r.qb       %[tn2], %[tp3], %[tp4]         \n\t"  /* average */
-            "sw               %[tn2],         28(%[dst])     \n\t"  /* store */
-            "ulw              %[tp3],         36(%[src])     \n\t"
-            "ulw              %[tp4],         36(%[dst])     \n\t"
-            "adduh_r.qb       %[tn1], %[tp2], %[tp1]         \n\t"  /* average */
-            "ulw              %[tp1],         40(%[src])     \n\t"
-            "ulw              %[tp2],         40(%[dst])     \n\t"
-            "sw               %[tn1],         32(%[dst])     \n\t"  /* store */
-            "adduh_r.qb       %[tn2], %[tp3], %[tp4]         \n\t"  /* average */
-            "sw               %[tn2],         36(%[dst])     \n\t"  /* store */
-            "ulw              %[tp3],         44(%[src])     \n\t"
-            "ulw              %[tp4],         44(%[dst])     \n\t"
-            "adduh_r.qb       %[tn1], %[tp2], %[tp1]         \n\t"  /* average */
-            "ulw              %[tp1],         48(%[src])     \n\t"
-            "ulw              %[tp2],         48(%[dst])     \n\t"
-            "sw               %[tn1],         40(%[dst])     \n\t"  /* store */
-            "adduh_r.qb       %[tn2], %[tp3], %[tp4]         \n\t"  /* average */
-            "sw               %[tn2],         44(%[dst])     \n\t"  /* store */
-            "ulw              %[tp3],         52(%[src])     \n\t"
-            "ulw              %[tp4],         52(%[dst])     \n\t"
-            "adduh_r.qb       %[tn1], %[tp2], %[tp1]         \n\t"  /* average */
-            "ulw              %[tp1],         56(%[src])     \n\t"
-            "ulw              %[tp2],         56(%[dst])     \n\t"
-            "sw               %[tn1],         48(%[dst])     \n\t"  /* store */
-            "adduh_r.qb       %[tn2], %[tp3], %[tp4]         \n\t"  /* average */
-            "sw               %[tn2],         52(%[dst])     \n\t"  /* store */
-            "ulw              %[tp3],         60(%[src])     \n\t"
-            "ulw              %[tp4],         60(%[dst])     \n\t"
-            "adduh_r.qb       %[tn1], %[tp2], %[tp1]         \n\t"  /* average */
-            "sw               %[tn1],         56(%[dst])     \n\t"  /* store */
-            "adduh_r.qb       %[tn2], %[tp3], %[tp4]         \n\t"  /* average */
-            "sw               %[tn2],         60(%[dst])     \n\t"  /* store */
-
-            : [tp1] "=&r" (tp1), [tp2] "=&r" (tp2),
-              [tp3] "=&r" (tp3), [tp4] "=&r" (tp4),
-              [tn1] "=&r" (tn1), [tn2] "=&r" (tn2)
-            : [src] "r" (src), [dst] "r" (dst)
-        );
-
-        src += src_stride;
-        dst += dst_stride;
-      }
-      break;
-    default:
-      for (y = h; y > 0; --y) {
-        for (x = 0; x < w; ++x) {
-          dst[x] = (dst[x] + src[x] + 1) >> 1;
-        }
-
-        src += src_stride;
-        dst += dst_stride;
-      }
-      break;
-  }
-}
-#endif
--- a/vpx_dsp/mips/vpx_convolve8_avg_horiz_dspr2.c
+++ /dev/null
@@ -1,1036 +1,0 @@
-/*
- *  Copyright (c) 2013 The WebM project authors. All Rights Reserved.
- *
- *  Use of this source code is governed by a BSD-style license
- *  that can be found in the LICENSE file in the root of the source
- *  tree. An additional intellectual property rights grant can be found
- *  in the file PATENTS.  All contributing project authors may
- *  be found in the AUTHORS file in the root of the source tree.
- */
-
-#include <assert.h>
-#include <stdio.h>
-
-#include "./vpx_dsp_rtcd.h"
-#include "vpx_dsp/mips/vpx_common_dspr2.h"
-#include "vpx_dsp/vpx_convolve.h"
-#include "vpx_dsp/vpx_dsp_common.h"
-#include "vpx_ports/mem.h"
-
-#if HAVE_DSPR2
-static void convolve_avg_horiz_4_dspr2(const uint8_t *src,
-                                       int32_t src_stride,
-                                       uint8_t *dst,
-                                       int32_t dst_stride,
-                                       const int16_t *filter_x0,
-                                       int32_t h) {
-  int32_t y;
-  uint8_t *cm = vpx_ff_cropTbl;
-  int32_t  vector1b, vector2b, vector3b, vector4b;
-  int32_t  Temp1, Temp2, Temp3, Temp4;
-  uint32_t vector4a = 64;
-  uint32_t tp1, tp2;
-  uint32_t p1, p2, p3, p4;
-  uint32_t n1, n2, n3, n4;
-  uint32_t tn1, tn2;
-
-  vector1b = ((const int32_t *)filter_x0)[0];
-  vector2b = ((const int32_t *)filter_x0)[1];
-  vector3b = ((const int32_t *)filter_x0)[2];
-  vector4b = ((const int32_t *)filter_x0)[3];
-
-  for (y = h; y--;) {
-    /* prefetch data to cache memory */
-    prefetch_load(src + src_stride);
-    prefetch_load(src + src_stride + 32);
-    prefetch_store(dst + dst_stride);
-
-    __asm__ __volatile__ (
-        "ulw              %[tp1],         0(%[src])                      \n\t"
-        "ulw              %[tp2],         4(%[src])                      \n\t"
-
-        /* even 1. pixel */
-        "mtlo             %[vector4a],    $ac3                           \n\t"
-        "mthi             $zero,          $ac3                           \n\t"
-        "preceu.ph.qbr    %[p1],          %[tp1]                         \n\t"
-        "preceu.ph.qbl    %[p2],          %[tp1]                         \n\t"
-        "preceu.ph.qbr    %[p3],          %[tp2]                         \n\t"
-        "preceu.ph.qbl    %[p4],          %[tp2]                         \n\t"
-        "dpa.w.ph         $ac3,           %[p1],          %[vector1b]    \n\t"
-        "dpa.w.ph         $ac3,           %[p2],          %[vector2b]    \n\t"
-        "dpa.w.ph         $ac3,           %[p3],          %[vector3b]    \n\t"
-        "ulw              %[tn2],         8(%[src])                      \n\t"
-        "dpa.w.ph         $ac3,           %[p4],          %[vector4b]    \n\t"
-        "extp             %[Temp1],       $ac3,           31             \n\t"
-
-        /* even 2. pixel */
-        "mtlo             %[vector4a],    $ac2                           \n\t"
-        "mthi             $zero,          $ac2                           \n\t"
-        "preceu.ph.qbr    %[p1],          %[tn2]                         \n\t"
-        "balign           %[tn1],         %[tn2],         3              \n\t"
-        "balign           %[tn2],         %[tp2],         3              \n\t"
-        "balign           %[tp2],         %[tp1],         3              \n\t"
-        "dpa.w.ph         $ac2,           %[p2],          %[vector1b]    \n\t"
-        "dpa.w.ph         $ac2,           %[p3],          %[vector2b]    \n\t"
-        "dpa.w.ph         $ac2,           %[p4],          %[vector3b]    \n\t"
-        "dpa.w.ph         $ac2,           %[p1],          %[vector4b]    \n\t"
-        "extp             %[Temp3],       $ac2,           31             \n\t"
-
-        "lbu              %[p2],          3(%[dst])                      \n\t"  /* load odd 2 */
-
-        /* odd 1. pixel */
-        "lbux             %[tp1],         %[Temp1](%[cm])                \n\t"  /* even 1 */
-        "mtlo             %[vector4a],    $ac3                           \n\t"
-        "mthi             $zero,          $ac3                           \n\t"
-        "lbu              %[Temp1],       1(%[dst])                      \n\t"  /* load odd 1 */
-        "preceu.ph.qbr    %[n1],          %[tp2]                         \n\t"
-        "preceu.ph.qbl    %[n2],          %[tp2]                         \n\t"
-        "preceu.ph.qbr    %[n3],          %[tn2]                         \n\t"
-        "preceu.ph.qbl    %[n4],          %[tn2]                         \n\t"
-        "dpa.w.ph         $ac3,           %[n1],          %[vector1b]    \n\t"
-        "dpa.w.ph         $ac3,           %[n2],          %[vector2b]    \n\t"
-        "dpa.w.ph         $ac3,           %[n3],          %[vector3b]    \n\t"
-        "dpa.w.ph         $ac3,           %[n4],          %[vector4b]    \n\t"
-        "extp             %[Temp2],       $ac3,           31             \n\t"
-
-        "lbu              %[tn2],         0(%[dst])                      \n\t"  /* load even 1 */
-
-        /* odd 2. pixel */
-        "lbux             %[tp2],         %[Temp3](%[cm])                \n\t"  /* even 2 */
-        "mtlo             %[vector4a],    $ac2                           \n\t"
-        "mthi             $zero,          $ac2                           \n\t"
-        "preceu.ph.qbr    %[n1],          %[tn1]                         \n\t"
-        "lbux             %[tn1],         %[Temp2](%[cm])                \n\t"  /* odd 1 */
-        "addqh_r.w        %[tn2],         %[tn2],         %[tp1]         \n\t"  /* average even 1 */
-        "dpa.w.ph         $ac2,           %[n2],          %[vector1b]    \n\t"
-        "dpa.w.ph         $ac2,           %[n3],          %[vector2b]    \n\t"
-        "dpa.w.ph         $ac2,           %[n4],          %[vector3b]    \n\t"
-        "dpa.w.ph         $ac2,           %[n1],          %[vector4b]    \n\t"
-        "extp             %[Temp4],       $ac2,           31             \n\t"
-
-        "lbu              %[tp1],         2(%[dst])                      \n\t"  /* load even 2 */
-        "sb               %[tn2],         0(%[dst])                      \n\t"  /* store even 1 */
-
-        /* clamp */
-        "addqh_r.w        %[Temp1],       %[Temp1],       %[tn1]         \n\t"  /* average odd 1 */
-        "lbux             %[n2],          %[Temp4](%[cm])                \n\t"  /* odd 2 */
-        "sb               %[Temp1],       1(%[dst])                      \n\t"  /* store odd 1 */
-
-        "addqh_r.w        %[tp1],         %[tp1],         %[tp2]         \n\t"  /* average even 2 */
-        "sb               %[tp1],         2(%[dst])                      \n\t"  /* store even 2 */
-
-        "addqh_r.w        %[p2],          %[p2],          %[n2]          \n\t"  /* average odd 2 */
-        "sb               %[p2],          3(%[dst])                      \n\t"  /* store odd 2 */
-
-        : [tp1] "=&r" (tp1), [tp2] "=&r" (tp2),
-          [tn1] "=&r" (tn1), [tn2] "=&r" (tn2),
-          [p1] "=&r" (p1), [p2] "=&r" (p2), [p3] "=&r" (p3), [p4] "=&r" (p4),
-          [n1] "=&r" (n1), [n2] "=&r" (n2), [n3] "=&r" (n3), [n4] "=&r" (n4),
-          [Temp1] "=&r" (Temp1), [Temp2] "=&r" (Temp2),
-          [Temp3] "=&r" (Temp3), [Temp4] "=&r" (Temp4)
-        : [vector1b] "r" (vector1b), [vector2b] "r" (vector2b),
-          [vector3b] "r" (vector3b), [vector4b] "r" (vector4b),
-          [vector4a] "r" (vector4a),
-          [cm] "r" (cm), [dst] "r" (dst), [src] "r" (src)
-    );
-
-    /* Next row... */
-    src += src_stride;
-    dst += dst_stride;
-  }
-}
-
-static void convolve_avg_horiz_8_dspr2(const uint8_t *src,
-                                       int32_t src_stride,
-                                       uint8_t *dst,
-                                       int32_t dst_stride,
-                                       const int16_t *filter_x0,
-                                       int32_t h) {
-  int32_t y;
-  uint8_t *cm = vpx_ff_cropTbl;
-  uint32_t vector4a = 64;
-  int32_t vector1b, vector2b, vector3b, vector4b;
-  int32_t Temp1, Temp2, Temp3;
-  uint32_t tp1, tp2;
-  uint32_t p1, p2, p3, p4, n1;
-  uint32_t tn1, tn2, tn3;
-  uint32_t st0, st1;
-
-  vector1b = ((const int32_t *)filter_x0)[0];
-  vector2b = ((const int32_t *)filter_x0)[1];
-  vector3b = ((const int32_t *)filter_x0)[2];
-  vector4b = ((const int32_t *)filter_x0)[3];
-
-  for (y = h; y--;) {
-    /* prefetch data to cache memory */
-    prefetch_load(src + src_stride);
-    prefetch_load(src + src_stride + 32);
-    prefetch_store(dst + dst_stride);
-
-    __asm__ __volatile__ (
-        "ulw              %[tp1],         0(%[src])                      \n\t"
-        "ulw              %[tp2],         4(%[src])                      \n\t"
-
-        /* even 1. pixel */
-        "mtlo             %[vector4a],    $ac3                           \n\t"
-        "mthi             $zero,          $ac3                           \n\t"
-        "mtlo             %[vector4a],    $ac2                           \n\t"
-        "mthi             $zero,          $ac2                           \n\t"
-        "preceu.ph.qbr    %[p1],          %[tp1]                         \n\t"
-        "preceu.ph.qbl    %[p2],          %[tp1]                         \n\t"
-        "preceu.ph.qbr    %[p3],          %[tp2]                         \n\t"
-        "preceu.ph.qbl    %[p4],          %[tp2]                         \n\t"
-        "ulw              %[tn2],         8(%[src])                      \n\t"
-        "dpa.w.ph         $ac3,           %[p1],          %[vector1b]    \n\t"
-        "dpa.w.ph         $ac3,           %[p2],          %[vector2b]    \n\t"
-        "dpa.w.ph         $ac3,           %[p3],          %[vector3b]    \n\t"
-        "dpa.w.ph         $ac3,           %[p4],          %[vector4b]    \n\t"
-        "extp             %[Temp1],       $ac3,           31             \n\t"
-        "lbu              %[Temp2],       0(%[dst])                      \n\t"
-        "lbu              %[tn3],         2(%[dst])                      \n\t"
-
-        /* even 2. pixel */
-        "preceu.ph.qbr    %[p1],          %[tn2]                         \n\t"
-        "preceu.ph.qbl    %[n1],          %[tn2]                         \n\t"
-        "ulw              %[tn1],         12(%[src])                     \n\t"
-        "dpa.w.ph         $ac2,           %[p2],          %[vector1b]    \n\t"
-        "dpa.w.ph         $ac2,           %[p3],          %[vector2b]    \n\t"
-        "dpa.w.ph         $ac2,           %[p4],          %[vector3b]    \n\t"
-        "dpa.w.ph         $ac2,           %[p1],          %[vector4b]    \n\t"
-        "extp             %[Temp3],       $ac2,           31             \n\t"
-
-        /* even 3. pixel */
-        "lbux             %[st0],         %[Temp1](%[cm])                \n\t"
-        "mtlo             %[vector4a],    $ac1                           \n\t"
-        "mthi             $zero,          $ac1                           \n\t"
-        "preceu.ph.qbr    %[p2],          %[tn1]                         \n\t"
-        "lbux             %[st1],         %[Temp3](%[cm])                \n\t"
-        "dpa.w.ph         $ac1,           %[p3],          %[vector1b]    \n\t"
-        "dpa.w.ph         $ac1,           %[p4],          %[vector2b]    \n\t"
-        "dpa.w.ph         $ac1,           %[p1],          %[vector3b]    \n\t"
-        "dpa.w.ph         $ac1,           %[n1],          %[vector4b]    \n\t"
-        "extp             %[Temp1],       $ac1,           31             \n\t"
-
-        "addqh_r.w        %[Temp2],       %[Temp2],       %[st0]         \n\t"
-        "addqh_r.w        %[tn3],         %[tn3],         %[st1]         \n\t"
-        "sb               %[Temp2],       0(%[dst])                      \n\t"
-        "sb               %[tn3],         2(%[dst])                      \n\t"
-
-        /* even 4. pixel */
-        "mtlo             %[vector4a],    $ac2                           \n\t"
-        "mthi             $zero,          $ac2                           \n\t"
-        "mtlo             %[vector4a],    $ac3                           \n\t"
-        "mthi             $zero,          $ac3                           \n\t"
-
-        "balign           %[tn3],         %[tn1],         3              \n\t"
-        "balign           %[tn1],         %[tn2],         3              \n\t"
-        "balign           %[tn2],         %[tp2],         3              \n\t"
-        "balign           %[tp2],         %[tp1],         3              \n\t"
-
-        "lbux             %[st0],         %[Temp1](%[cm])                \n\t"
-        "lbu              %[Temp2],       4(%[dst])                      \n\t"
-        "addqh_r.w        %[Temp2],       %[Temp2],       %[st0]         \n\t"
-
-        "dpa.w.ph         $ac2,           %[p4],          %[vector1b]    \n\t"
-        "dpa.w.ph         $ac2,           %[p1],          %[vector2b]    \n\t"
-        "dpa.w.ph         $ac2,           %[n1],          %[vector3b]    \n\t"
-        "dpa.w.ph         $ac2,           %[p2],          %[vector4b]    \n\t"
-        "extp             %[Temp3],       $ac2,           31             \n\t"
-
-        /* odd 1. pixel */
-        "mtlo             %[vector4a],    $ac1                           \n\t"
-        "mthi             $zero,          $ac1                           \n\t"
-        "sb               %[Temp2],       4(%[dst])                      \n\t"
-        "preceu.ph.qbr    %[p1],          %[tp2]                         \n\t"
-        "preceu.ph.qbl    %[p2],          %[tp2]                         \n\t"
-        "preceu.ph.qbr    %[p3],          %[tn2]                         \n\t"
-        "preceu.ph.qbl    %[p4],          %[tn2]                         \n\t"
-        "dpa.w.ph         $ac3,           %[p1],          %[vector1b]    \n\t"
-        "dpa.w.ph         $ac3,           %[p2],          %[vector2b]    \n\t"
-        "dpa.w.ph         $ac3,           %[p3],          %[vector3b]    \n\t"
-        "dpa.w.ph         $ac3,           %[p4],          %[vector4b]    \n\t"
-        "extp             %[Temp2],       $ac3,           31             \n\t"
-
-        "lbu              %[tp1],         6(%[dst])                      \n\t"
-
-        /* odd 2. pixel */
-        "mtlo             %[vector4a],    $ac3                           \n\t"
-        "mthi             $zero,          $ac3                           \n\t"
-        "mtlo             %[vector4a],    $ac2                           \n\t"
-        "mthi             $zero,          $ac2                           \n\t"
-        "preceu.ph.qbr    %[p1],          %[tn1]                         \n\t"
-        "preceu.ph.qbl    %[n1],          %[tn1]                         \n\t"
-        "lbux             %[st0],         %[Temp3](%[cm])                \n\t"
-        "dpa.w.ph         $ac1,           %[p2],          %[vector1b]    \n\t"
-        "dpa.w.ph         $ac1,           %[p3],          %[vector2b]    \n\t"
-        "dpa.w.ph         $ac1,           %[p4],          %[vector3b]    \n\t"
-        "dpa.w.ph         $ac1,           %[p1],          %[vector4b]    \n\t"
-        "extp             %[Temp3],       $ac1,           31             \n\t"
-
-        "lbu              %[tp2],         1(%[dst])                      \n\t"
-        "lbu              %[tn2],         3(%[dst])                      \n\t"
-        "addqh_r.w        %[tp1],         %[tp1],         %[st0]         \n\t"
-
-        /* odd 3. pixel */
-        "lbux             %[st1],         %[Temp2](%[cm])                \n\t"
-        "preceu.ph.qbr    %[p2],          %[tn3]                         \n\t"
-        "dpa.w.ph         $ac3,           %[p3],          %[vector1b]    \n\t"
-        "dpa.w.ph         $ac3,           %[p4],          %[vector2b]    \n\t"
-        "dpa.w.ph         $ac3,           %[p1],          %[vector3b]    \n\t"
-        "dpa.w.ph         $ac3,           %[n1],          %[vector4b]    \n\t"
-        "addqh_r.w        %[tp2],         %[tp2],         %[st1]         \n\t"
-        "extp             %[Temp2],       $ac3,           31             \n\t"
-
-        "lbu              %[tn3],         5(%[dst])                      \n\t"
-
-        /* odd 4. pixel */
-        "sb               %[tp2],         1(%[dst])                      \n\t"
-        "sb               %[tp1],         6(%[dst])                      \n\t"
-        "dpa.w.ph         $ac2,           %[p4],          %[vector1b]    \n\t"
-        "dpa.w.ph         $ac2,           %[p1],          %[vector2b]    \n\t"
-        "dpa.w.ph         $ac2,           %[n1],          %[vector3b]    \n\t"
-        "dpa.w.ph         $ac2,           %[p2],          %[vector4b]    \n\t"
-        "extp             %[Temp1],       $ac2,           31             \n\t"
-
-        "lbu              %[tn1],         7(%[dst])                      \n\t"
-
-        /* clamp */
-        "lbux             %[p4],          %[Temp3](%[cm])                \n\t"
-        "addqh_r.w        %[tn2],         %[tn2],         %[p4]          \n\t"
-
-        "lbux             %[p2],          %[Temp2](%[cm])                \n\t"
-        "addqh_r.w        %[tn3],         %[tn3],         %[p2]          \n\t"
-
-        "lbux             %[n1],          %[Temp1](%[cm])                \n\t"
-        "addqh_r.w        %[tn1],         %[tn1],         %[n1]          \n\t"
-
-        /* store bytes */
-        "sb               %[tn2],         3(%[dst])                      \n\t"
-        "sb               %[tn3],         5(%[dst])                      \n\t"
-        "sb               %[tn1],         7(%[dst])                      \n\t"
-
-        : [tp1] "=&r" (tp1), [tp2] "=&r" (tp2),
-          [tn1] "=&r" (tn1), [tn2] "=&r" (tn2), [tn3] "=&r" (tn3),
-          [st0] "=&r" (st0), [st1] "=&r" (st1),
-          [p1] "=&r" (p1), [p2] "=&r" (p2), [p3] "=&r" (p3), [p4] "=&r" (p4),
-          [n1] "=&r" (n1),
-          [Temp1] "=&r" (Temp1), [Temp2] "=&r" (Temp2), [Temp3] "=&r" (Temp3)
-        : [vector1b] "r" (vector1b), [vector2b] "r" (vector2b),
-          [vector3b] "r" (vector3b), [vector4b] "r" (vector4b),
-          [vector4a] "r" (vector4a),
-          [cm] "r" (cm), [dst] "r" (dst), [src] "r" (src)
-    );
-
-    /* Next row... */
-    src += src_stride;
-    dst += dst_stride;
-  }
-}
-
-static void convolve_avg_horiz_16_dspr2(const uint8_t *src_ptr,
-                                        int32_t src_stride,
-                                        uint8_t *dst_ptr,
-                                        int32_t dst_stride,
-                                        const int16_t *filter_x0,
-                                        int32_t h,
-                                        int32_t count) {
-  int32_t y, c;
-  const uint8_t *src;
-  uint8_t *dst;
-  uint8_t *cm = vpx_ff_cropTbl;
-  uint32_t vector_64 = 64;
-  int32_t filter12, filter34, filter56, filter78;
-  int32_t Temp1, Temp2, Temp3;
-  uint32_t qload1, qload2, qload3;
-  uint32_t p1, p2, p3, p4, p5;
-  uint32_t st1, st2, st3;
-
-  filter12 = ((const int32_t *)filter_x0)[0];
-  filter34 = ((const int32_t *)filter_x0)[1];
-  filter56 = ((const int32_t *)filter_x0)[2];
-  filter78 = ((const int32_t *)filter_x0)[3];
-
-  for (y = h; y--;) {
-    src = src_ptr;
-    dst = dst_ptr;
-
-    /* prefetch data to cache memory */
-    prefetch_load(src_ptr + src_stride);
-    prefetch_load(src_ptr + src_stride + 32);
-    prefetch_store(dst_ptr + dst_stride);
-
-    for (c = 0; c < count; c++) {
-      __asm__ __volatile__ (
-          "ulw              %[qload1],    0(%[src])                    \n\t"
-          "ulw              %[qload2],    4(%[src])                    \n\t"
-
-          /* even 1. pixel */
-          "mtlo             %[vector_64], $ac1                         \n\t" /* even 1 */
-          "mthi             $zero,        $ac1                         \n\t"
-          "mtlo             %[vector_64], $ac2                         \n\t" /* even 2 */
-          "mthi             $zero,        $ac2                         \n\t"
-          "preceu.ph.qbr    %[p1],        %[qload1]                    \n\t"
-          "preceu.ph.qbl    %[p2],        %[qload1]                    \n\t"
-          "preceu.ph.qbr    %[p3],        %[qload2]                    \n\t"
-          "preceu.ph.qbl    %[p4],        %[qload2]                    \n\t"
-          "ulw              %[qload3],    8(%[src])                    \n\t"
-          "dpa.w.ph         $ac1,         %[p1],          %[filter12]  \n\t" /* even 1 */
-          "dpa.w.ph         $ac1,         %[p2],          %[filter34]  \n\t" /* even 1 */
-          "dpa.w.ph         $ac1,         %[p3],          %[filter56]  \n\t" /* even 1 */
-          "dpa.w.ph         $ac1,         %[p4],          %[filter78]  \n\t" /* even 1 */
-          "extp             %[Temp1],     $ac1,           31           \n\t" /* even 1 */
-          "lbu              %[st2],       0(%[dst])                    \n\t" /* load even 1 from dst */
-
-          /* even 2. pixel */
-          "mtlo             %[vector_64], $ac3                         \n\t" /* even 3 */
-          "mthi             $zero,        $ac3                         \n\t"
-          "preceu.ph.qbr    %[p1],        %[qload3]                    \n\t"
-          "preceu.ph.qbl    %[p5],        %[qload3]                    \n\t"
-          "ulw              %[qload1],    12(%[src])                   \n\t"
-          "dpa.w.ph         $ac2,         %[p2],          %[filter12]  \n\t" /* even 1 */
-          "dpa.w.ph         $ac2,         %[p3],          %[filter34]  \n\t" /* even 1 */
-          "dpa.w.ph         $ac2,         %[p4],          %[filter56]  \n\t" /* even 1 */
-          "dpa.w.ph         $ac2,         %[p1],          %[filter78]  \n\t" /* even 1 */
-          "extp             %[Temp2],     $ac2,           31           \n\t" /* even 1 */
-          "lbux             %[st1],       %[Temp1](%[cm])              \n\t" /* even 1 */
-
-          "lbu              %[qload3],    2(%[dst])                    \n\t" /* load even 2 from dst */
-
-          /* even 3. pixel */
-          "mtlo             %[vector_64], $ac1                         \n\t" /* even 4 */
-          "mthi             $zero,        $ac1                         \n\t"
-          "addqh_r.w        %[st2],       %[st2],         %[st1]       \n\t" /* average even 1 */
-          "preceu.ph.qbr    %[p2],        %[qload1]                    \n\t"
-          "sb               %[st2],       0(%[dst])                    \n\t" /* store even 1 to dst */
-          "dpa.w.ph         $ac3,         %[p3],          %[filter12]  \n\t" /* even 3 */
-          "dpa.w.ph         $ac3,         %[p4],          %[filter34]  \n\t" /* even 3 */
-          "dpa.w.ph         $ac3,         %[p1],          %[filter56]  \n\t" /* even 3 */
-          "dpa.w.ph         $ac3,         %[p5],          %[filter78]  \n\t" /* even 3 */
-          "extp             %[Temp3],     $ac3,           31           \n\t" /* even 3 */
-          "lbux             %[st2],       %[Temp2](%[cm])              \n\t" /* even 1 */
-
-          /* even 4. pixel */
-          "mtlo             %[vector_64], $ac2                         \n\t" /* even 5 */
-          "mthi             $zero,        $ac2                         \n\t"
-          "addqh_r.w        %[qload3],    %[qload3],      %[st2]       \n\t" /* average even 2 */
-          "preceu.ph.qbl    %[p3],        %[qload1]                    \n\t"
-          "sb               %[qload3],    2(%[dst])                    \n\t" /* store even 2 to dst */
-          "ulw              %[qload2],    16(%[src])                   \n\t"
-          "lbu              %[qload3],    4(%[dst])                    \n\t" /* load even 3 from dst */
-          "lbu              %[qload1],    6(%[dst])                    \n\t" /* load even 4 from dst */
-          "dpa.w.ph         $ac1,         %[p4],          %[filter12]  \n\t" /* even 4 */
-          "dpa.w.ph         $ac1,         %[p1],          %[filter34]  \n\t" /* even 4 */
-          "dpa.w.ph         $ac1,         %[p5],          %[filter56]  \n\t" /* even 4 */
-          "dpa.w.ph         $ac1,         %[p2],          %[filter78]  \n\t" /* even 4 */
-          "extp             %[Temp1],     $ac1,           31           \n\t" /* even 4 */
-          "lbux             %[st3],       %[Temp3](%[cm])              \n\t" /* even 3 */
-
-          /* even 5. pixel */
-          "mtlo             %[vector_64], $ac3                         \n\t" /* even 6 */
-          "mthi             $zero,        $ac3                         \n\t"
-          "addqh_r.w        %[qload3],    %[qload3],      %[st3]       \n\t" /* average even 3 */
-          "preceu.ph.qbr    %[p4],        %[qload2]                    \n\t"
-          "sb               %[qload3],    4(%[dst])                    \n\t" /* store even 3 to dst */
-          "dpa.w.ph         $ac2,         %[p1],          %[filter12]  \n\t" /* even 5 */
-          "dpa.w.ph         $ac2,         %[p5],          %[filter34]  \n\t" /* even 5 */
-          "dpa.w.ph         $ac2,         %[p2],          %[filter56]  \n\t" /* even 5 */
-          "dpa.w.ph         $ac2,         %[p3],          %[filter78]  \n\t" /* even 5 */
-          "extp             %[Temp2],     $ac2,           31           \n\t" /* even 5 */
-          "lbux             %[st1],       %[Temp1](%[cm])              \n\t" /* even 4 */
-
-          /* even 6. pixel */
-          "mtlo             %[vector_64], $ac1                         \n\t" /* even 7 */
-          "mthi             $zero,        $ac1                         \n\t"
-          "addqh_r.w        %[qload1],    %[qload1],      %[st1]       \n\t" /* average even 4 */
-          "preceu.ph.qbl    %[p1],        %[qload2]                    \n\t"
-          "sb               %[qload1],    6(%[dst])                    \n\t" /* store even 4 to dst */
-          "ulw              %[qload3],    20(%[src])                   \n\t"
-          "dpa.w.ph         $ac3,         %[p5],          %[filter12]  \n\t" /* even 6 */
-          "dpa.w.ph         $ac3,         %[p2],          %[filter34]  \n\t" /* even 6 */
-          "dpa.w.ph         $ac3,         %[p3],          %[filter56]  \n\t" /* even 6 */
-          "dpa.w.ph         $ac3,         %[p4],          %[filter78]  \n\t" /* even 6 */
-          "lbu              %[qload2],    8(%[dst])                    \n\t" /* load even 5 from dst */
-          "extp             %[Temp3],     $ac3,           31           \n\t" /* even 6 */
-          "lbux             %[st2],       %[Temp2](%[cm])              \n\t" /* even 5 */
-
-          /* even 7. pixel */
-          "mtlo             %[vector_64], $ac2                         \n\t" /* even 8 */
-          "mthi             $zero,        $ac2                         \n\t"
-          "addqh_r.w        %[qload2],    %[qload2],      %[st2]       \n\t" /* average even 5 */
-          "preceu.ph.qbr    %[p5],        %[qload3]                    \n\t"
-          "sb               %[qload2],    8(%[dst])                    \n\t" /* store even 5 to dst */
-          "dpa.w.ph         $ac1,         %[p2],          %[filter12]  \n\t" /* even 7 */
-          "dpa.w.ph         $ac1,         %[p3],          %[filter34]  \n\t" /* even 7 */
-          "dpa.w.ph         $ac1,         %[p4],          %[filter56]  \n\t" /* even 7 */
-          "dpa.w.ph         $ac1,         %[p1],          %[filter78]  \n\t" /* even 7 */
-          "lbu              %[qload3],    10(%[dst])                   \n\t" /* load even 6 from dst */
-          "extp             %[Temp1],     $ac1,           31           \n\t" /* even 7 */
-          "lbux             %[st3],       %[Temp3](%[cm])              \n\t" /* even 6 */
-
-          "lbu              %[st2],       12(%[dst])                   \n\t" /* load even 7 from dst */
-
-          /* even 8. pixel */
-          "mtlo             %[vector_64], $ac3                         \n\t" /* odd 1 */
-          "mthi             $zero,        $ac3                         \n\t"
-          "addqh_r.w        %[qload3],    %[qload3],      %[st3]       \n\t" /* average even 6 */
-          "dpa.w.ph         $ac2,         %[p3],          %[filter12]  \n\t" /* even 8 */
-          "dpa.w.ph         $ac2,         %[p4],          %[filter34]  \n\t" /* even 8 */
-          "sb               %[qload3],    10(%[dst])                   \n\t" /* store even 6 to dst */
-          "dpa.w.ph         $ac2,         %[p1],          %[filter56]  \n\t" /* even 8 */
-          "dpa.w.ph         $ac2,         %[p5],          %[filter78]  \n\t" /* even 8 */
-          "extp             %[Temp2],     $ac2,           31           \n\t" /* even 8 */
-          "lbux             %[st1],       %[Temp1](%[cm])              \n\t" /* even 7 */
-
-          /* ODD pixels */
-          "ulw              %[qload1],    1(%[src])                   \n\t"
-          "ulw              %[qload2],    5(%[src])                    \n\t"
-
-          "addqh_r.w        %[st2],       %[st2],         %[st1]       \n\t" /* average even 7 */
-
-          /* odd 1. pixel */
-          "mtlo             %[vector_64], $ac1                         \n\t" /* odd 2 */
-          "mthi             $zero,        $ac1                         \n\t"
-          "preceu.ph.qbr    %[p1],        %[qload1]                    \n\t"
-          "preceu.ph.qbl    %[p2],        %[qload1]                    \n\t"
-          "preceu.ph.qbr    %[p3],        %[qload2]                    \n\t"
-          "preceu.ph.qbl    %[p4],        %[qload2]                    \n\t"
-          "sb               %[st2],       12(%[dst])                   \n\t" /* store even 7 to dst */
-          "ulw              %[qload3],    9(%[src])                    \n\t"
-          "dpa.w.ph         $ac3,         %[p1],          %[filter12]  \n\t" /* odd 1 */
-          "dpa.w.ph         $ac3,         %[p2],          %[filter34]  \n\t" /* odd 1 */
-          "lbu              %[qload2],    14(%[dst])                   \n\t" /* load even 8 from dst */
-          "dpa.w.ph         $ac3,         %[p3],          %[filter56]  \n\t" /* odd 1 */
-          "dpa.w.ph         $ac3,         %[p4],          %[filter78]  \n\t" /* odd 1 */
-          "extp             %[Temp3],     $ac3,           31           \n\t" /* odd 1 */
-          "lbux             %[st2],       %[Temp2](%[cm])              \n\t" /* even 8 */
-
-          "lbu              %[st1],       1(%[dst])                    \n\t" /* load odd 1 from dst */
-
-          /* odd 2. pixel */
-          "mtlo             %[vector_64], $ac2                         \n\t" /* odd 3 */
-          "mthi             $zero,        $ac2                         \n\t"
-          "addqh_r.w        %[qload2],    %[qload2],      %[st2]       \n\t" /* average even 8 */
-          "preceu.ph.qbr    %[p1],        %[qload3]                    \n\t"
-          "preceu.ph.qbl    %[p5],        %[qload3]                    \n\t"
-          "sb               %[qload2],    14(%[dst])                   \n\t" /* store even 8 to dst */
-          "ulw              %[qload1],    13(%[src])                   \n\t"
-          "dpa.w.ph         $ac1,         %[p2],          %[filter12]  \n\t" /* odd 2 */
-          "dpa.w.ph         $ac1,         %[p3],          %[filter34]  \n\t" /* odd 2 */
-          "dpa.w.ph         $ac1,         %[p4],          %[filter56]  \n\t" /* odd 2 */
-          "dpa.w.ph         $ac1,         %[p1],          %[filter78]  \n\t" /* odd 2 */
-          "lbu              %[qload3],    3(%[dst])                    \n\t" /* load odd 2 from dst */
-          "extp             %[Temp1],     $ac1,           31           \n\t" /* odd 2 */
-          "lbux             %[st3],       %[Temp3](%[cm])              \n\t" /* odd 1 */
-
-          /* odd 3. pixel */
-          "mtlo             %[vector_64], $ac3                         \n\t" /* odd 4 */
-          "mthi             $zero,        $ac3                         \n\t"
-          "addqh_r.w        %[st3],       %[st3],         %[st1]       \n\t" /* average odd 1 */
-          "preceu.ph.qbr    %[p2],        %[qload1]                    \n\t"
-          "dpa.w.ph         $ac2,         %[p3],          %[filter12]  \n\t" /* odd 3 */
-          "dpa.w.ph         $ac2,         %[p4],          %[filter34]  \n\t" /* odd 3 */
-          "dpa.w.ph         $ac2,         %[p1],          %[filter56]  \n\t" /* odd 3 */
-          "dpa.w.ph         $ac2,         %[p5],          %[filter78]  \n\t" /* odd 3 */
-          "sb               %[st3],       1(%[dst])                    \n\t" /* store odd 1 to dst */
-          "extp             %[Temp2],     $ac2,           31           \n\t" /* odd 3 */
-          "lbux             %[st1],       %[Temp1](%[cm])              \n\t" /* odd 2 */
-
-          /* odd 4. pixel */
-          "mtlo             %[vector_64], $ac1                         \n\t" /* odd 5 */
-          "mthi             $zero,        $ac1                         \n\t"
-          "addqh_r.w        %[qload3],    %[qload3],      %[st1]       \n\t" /* average odd 2 */
-          "preceu.ph.qbl    %[p3],        %[qload1]                    \n\t"
-          "sb               %[qload3],    3(%[dst])                    \n\t" /* store odd 2 to dst */
-          "lbu              %[qload1],    5(%[dst])                    \n\t" /* load odd 3 from dst */
-          "ulw              %[qload2],    17(%[src])                   \n\t"
-          "dpa.w.ph         $ac3,         %[p4],          %[filter12]  \n\t" /* odd 4 */
-          "dpa.w.ph         $ac3,         %[p1],          %[filter34]  \n\t" /* odd 4 */
-          "dpa.w.ph         $ac3,         %[p5],          %[filter56]  \n\t" /* odd 4 */
-          "dpa.w.ph         $ac3,         %[p2],          %[filter78]  \n\t" /* odd 4 */
-          "extp             %[Temp3],     $ac3,           31           \n\t" /* odd 4 */
-          "lbux             %[st2],       %[Temp2](%[cm])              \n\t" /* odd 3 */
-
-          "lbu              %[st1],       7(%[dst])                    \n\t" /* load odd 4 from dst */
-
-          /* odd 5. pixel */
-          "mtlo             %[vector_64], $ac2                         \n\t" /* odd 6 */
-          "mthi             $zero,        $ac2                         \n\t"
-          "addqh_r.w        %[qload1],    %[qload1],      %[st2]       \n\t" /* average odd 3 */
-          "preceu.ph.qbr    %[p4],        %[qload2]                    \n\t"
-          "sb               %[qload1],    5(%[dst])                    \n\t" /* store odd 3 to dst */
-          "dpa.w.ph         $ac1,         %[p1],          %[filter12]  \n\t" /* odd 5 */
-          "dpa.w.ph         $ac1,         %[p5],          %[filter34]  \n\t" /* odd 5 */
-          "dpa.w.ph         $ac1,         %[p2],          %[filter56]  \n\t" /* odd 5 */
-          "dpa.w.ph         $ac1,         %[p3],          %[filter78]  \n\t" /* odd 5 */
-          "extp             %[Temp1],     $ac1,           31           \n\t" /* odd 5 */
-          "lbux             %[st3],       %[Temp3](%[cm])              \n\t" /* odd 4 */
-
-          "lbu              %[qload1],    9(%[dst])                    \n\t" /* load odd 5 from dst */
-
-          /* odd 6. pixel */
-          "mtlo             %[vector_64], $ac3                         \n\t" /* odd 7 */
-          "mthi             $zero,        $ac3                         \n\t"
-          "addqh_r.w        %[st1],       %[st1],         %[st3]       \n\t" /* average odd 4 */
-          "preceu.ph.qbl    %[p1],        %[qload2]                    \n\t"
-          "sb               %[st1],       7(%[dst])                    \n\t" /* store odd 4 to dst */
-          "ulw              %[qload3],    21(%[src])                   \n\t"
-          "dpa.w.ph         $ac2,         %[p5],          %[filter12]  \n\t" /* odd 6 */
-          "dpa.w.ph         $ac2,         %[p2],          %[filter34]  \n\t" /* odd 6 */
-          "dpa.w.ph         $ac2,         %[p3],          %[filter56]  \n\t" /* odd 6 */
-          "dpa.w.ph         $ac2,         %[p4],          %[filter78]  \n\t" /* odd 6 */
-          "extp             %[Temp2],     $ac2,           31           \n\t" /* odd 6 */
-          "lbux             %[st1],       %[Temp1](%[cm])              \n\t" /* odd 5 */
-
-          /* odd 7. pixel */
-          "mtlo             %[vector_64], $ac1                         \n\t" /* odd 8 */
-          "mthi             $zero,        $ac1                         \n\t"
-          "addqh_r.w        %[qload1],    %[qload1],      %[st1]       \n\t" /* average odd 5 */
-          "preceu.ph.qbr    %[p5],        %[qload3]                    \n\t"
-          "sb               %[qload1],    9(%[dst])                    \n\t" /* store odd 5 to dst */
-          "lbu              %[qload2],    11(%[dst])                   \n\t" /* load odd 6 from dst */
-          "dpa.w.ph         $ac3,         %[p2],          %[filter12]  \n\t" /* odd 7 */
-          "dpa.w.ph         $ac3,         %[p3],          %[filter34]  \n\t" /* odd 7 */
-          "dpa.w.ph         $ac3,         %[p4],          %[filter56]  \n\t" /* odd 7 */
-          "dpa.w.ph         $ac3,         %[p1],          %[filter78]  \n\t" /* odd 7 */
-          "extp             %[Temp3],     $ac3,           31           \n\t" /* odd 7 */
-
-          "lbu              %[qload3],    13(%[dst])                   \n\t" /* load odd 7 from dst */
-
-          /* odd 8. pixel */
-          "dpa.w.ph         $ac1,         %[p3],          %[filter12]  \n\t" /* odd 8 */
-          "dpa.w.ph         $ac1,         %[p4],          %[filter34]  \n\t" /* odd 8 */
-          "dpa.w.ph         $ac1,         %[p1],          %[filter56]  \n\t" /* odd 8 */
-          "dpa.w.ph         $ac1,         %[p5],          %[filter78]  \n\t" /* odd 8 */
-          "extp             %[Temp1],     $ac1,           31           \n\t" /* odd 8 */
-
-          "lbu              %[qload1],    15(%[dst])                   \n\t" /* load odd 8 from dst */
-
-          "lbux             %[st2],       %[Temp2](%[cm])              \n\t" /* odd 6 */
-          "addqh_r.w        %[qload2],    %[qload2],      %[st2]       \n\t" /* average odd 6 */
-
-          "lbux             %[st3],       %[Temp3](%[cm])              \n\t" /* odd 7 */
-          "addqh_r.w        %[qload3],    %[qload3],      %[st3]       \n\t" /* average odd 7 */
-
-          "lbux             %[st1],       %[Temp1](%[cm])              \n\t" /* odd 8 */
-          "addqh_r.w        %[qload1],    %[qload1],      %[st1]       \n\t" /* average odd 8 */
-
-          "sb               %[qload2],    11(%[dst])                   \n\t" /* store odd 6 to dst */
-          "sb               %[qload3],    13(%[dst])                   \n\t" /* store odd 7 to dst */
-          "sb               %[qload1],    15(%[dst])                   \n\t" /* store odd 8 to dst */
-
-          : [qload1] "=&r" (qload1), [qload2] "=&r" (qload2),
-            [st1] "=&r" (st1), [st2] "=&r" (st2), [st3] "=&r" (st3),
-            [p1] "=&r" (p1), [p2] "=&r" (p2), [p3] "=&r" (p3), [p4] "=&r" (p4),
-            [qload3] "=&r" (qload3), [p5] "=&r" (p5),
-            [Temp1] "=&r" (Temp1), [Temp2] "=&r" (Temp2), [Temp3] "=&r" (Temp3)
-          : [filter12] "r" (filter12), [filter34] "r" (filter34),
-            [filter56] "r" (filter56), [filter78] "r" (filter78),
-            [vector_64] "r" (vector_64),
-            [cm] "r" (cm), [dst] "r" (dst), [src] "r" (src)
-      );
-
-      src += 16;
-      dst += 16;
-    }
-
-    /* Next row... */
-    src_ptr += src_stride;
-    dst_ptr += dst_stride;
-  }
-}
-
-static void convolve_avg_horiz_64_dspr2(const uint8_t *src_ptr,
-                                        int32_t src_stride,
-                                        uint8_t *dst_ptr,
-                                        int32_t dst_stride,
-                                        const int16_t *filter_x0,
-                                        int32_t h) {
-  int32_t y, c;
-  const uint8_t *src;
-  uint8_t *dst;
-  uint8_t *cm = vpx_ff_cropTbl;
-  uint32_t vector_64 = 64;
-  int32_t filter12, filter34, filter56, filter78;
-  int32_t Temp1, Temp2, Temp3;
-  uint32_t qload1, qload2, qload3;
-  uint32_t p1, p2, p3, p4, p5;
-  uint32_t st1, st2, st3;
-
-  filter12 = ((const int32_t *)filter_x0)[0];
-  filter34 = ((const int32_t *)filter_x0)[1];
-  filter56 = ((const int32_t *)filter_x0)[2];
-  filter78 = ((const int32_t *)filter_x0)[3];
-
-  for (y = h; y--;) {
-    src = src_ptr;
-    dst = dst_ptr;
-
-    /* prefetch data to cache memory */
-    prefetch_load(src_ptr + src_stride);
-    prefetch_load(src_ptr + src_stride + 32);
-    prefetch_load(src_ptr + src_stride + 64);
-    prefetch_store(dst_ptr + dst_stride);
-    prefetch_store(dst_ptr + dst_stride + 32);
-
-    for (c = 0; c < 4; c++) {
-      __asm__ __volatile__ (
-          "ulw              %[qload1],    0(%[src])                    \n\t"
-          "ulw              %[qload2],    4(%[src])                    \n\t"
-
-          /* even 1. pixel */
-          "mtlo             %[vector_64], $ac1                         \n\t" /* even 1 */
-          "mthi             $zero,        $ac1                         \n\t"
-          "mtlo             %[vector_64], $ac2                         \n\t" /* even 2 */
-          "mthi             $zero,        $ac2                         \n\t"
-          "preceu.ph.qbr    %[p1],        %[qload1]                    \n\t"
-          "preceu.ph.qbl    %[p2],        %[qload1]                    \n\t"
-          "preceu.ph.qbr    %[p3],        %[qload2]                    \n\t"
-          "preceu.ph.qbl    %[p4],        %[qload2]                    \n\t"
-          "ulw              %[qload3],    8(%[src])                    \n\t"
-          "dpa.w.ph         $ac1,         %[p1],          %[filter12]  \n\t" /* even 1 */
-          "dpa.w.ph         $ac1,         %[p2],          %[filter34]  \n\t" /* even 1 */
-          "dpa.w.ph         $ac1,         %[p3],          %[filter56]  \n\t" /* even 1 */
-          "dpa.w.ph         $ac1,         %[p4],          %[filter78]  \n\t" /* even 1 */
-          "extp             %[Temp1],     $ac1,           31           \n\t" /* even 1 */
-          "lbu              %[st2],       0(%[dst])                    \n\t" /* load even 1 from dst */
-
-          /* even 2. pixel */
-          "mtlo             %[vector_64], $ac3                         \n\t" /* even 3 */
-          "mthi             $zero,        $ac3                         \n\t"
-          "preceu.ph.qbr    %[p1],        %[qload3]                    \n\t"
-          "preceu.ph.qbl    %[p5],        %[qload3]                    \n\t"
-          "ulw              %[qload1],    12(%[src])                   \n\t"
-          "dpa.w.ph         $ac2,         %[p2],          %[filter12]  \n\t" /* even 1 */
-          "dpa.w.ph         $ac2,         %[p3],          %[filter34]  \n\t" /* even 1 */
-          "dpa.w.ph         $ac2,         %[p4],          %[filter56]  \n\t" /* even 1 */
-          "dpa.w.ph         $ac2,         %[p1],          %[filter78]  \n\t" /* even 1 */
-          "extp             %[Temp2],     $ac2,           31           \n\t" /* even 1 */
-          "lbux             %[st1],       %[Temp1](%[cm])              \n\t" /* even 1 */
-
-          "lbu              %[qload3],    2(%[dst])                    \n\t" /* load even 2 from dst */
-
-          /* even 3. pixel */
-          "mtlo             %[vector_64], $ac1                         \n\t" /* even 4 */
-          "mthi             $zero,        $ac1                         \n\t"
-          "addqh_r.w        %[st2],       %[st2],         %[st1]       \n\t" /* average even 1 */
-          "preceu.ph.qbr    %[p2],        %[qload1]                    \n\t"
-          "sb               %[st2],       0(%[dst])                    \n\t" /* store even 1 to dst */
-          "dpa.w.ph         $ac3,         %[p3],          %[filter12]  \n\t" /* even 3 */
-          "dpa.w.ph         $ac3,         %[p4],          %[filter34]  \n\t" /* even 3 */
-          "dpa.w.ph         $ac3,         %[p1],          %[filter56]  \n\t" /* even 3 */
-          "dpa.w.ph         $ac3,         %[p5],          %[filter78]  \n\t" /* even 3 */
-          "extp             %[Temp3],     $ac3,           31           \n\t" /* even 3 */
-          "lbux             %[st2],       %[Temp2](%[cm])              \n\t" /* even 1 */
-
-          /* even 4. pixel */
-          "mtlo             %[vector_64], $ac2                         \n\t" /* even 5 */
-          "mthi             $zero,        $ac2                         \n\t"
-          "addqh_r.w        %[qload3],    %[qload3],      %[st2]       \n\t" /* average even 2 */
-          "preceu.ph.qbl    %[p3],        %[qload1]                    \n\t"
-          "sb               %[qload3],    2(%[dst])                    \n\t" /* store even 2 to dst */
-          "ulw              %[qload2],    16(%[src])                   \n\t"
-          "lbu              %[qload3],    4(%[dst])                    \n\t" /* load even 3 from dst */
-          "lbu              %[qload1],    6(%[dst])                    \n\t" /* load even 4 from dst */
-          "dpa.w.ph         $ac1,         %[p4],          %[filter12]  \n\t" /* even 4 */
-          "dpa.w.ph         $ac1,         %[p1],          %[filter34]  \n\t" /* even 4 */
-          "dpa.w.ph         $ac1,         %[p5],          %[filter56]  \n\t" /* even 4 */
-          "dpa.w.ph         $ac1,         %[p2],          %[filter78]  \n\t" /* even 4 */
-          "extp             %[Temp1],     $ac1,           31           \n\t" /* even 4 */
-          "lbux             %[st3],       %[Temp3](%[cm])              \n\t" /* even 3 */
-
-          /* even 5. pixel */
-          "mtlo             %[vector_64], $ac3                         \n\t" /* even 6 */
-          "mthi             $zero,        $ac3                         \n\t"
-          "addqh_r.w        %[qload3],    %[qload3],      %[st3]       \n\t" /* average even 3 */
-          "preceu.ph.qbr    %[p4],        %[qload2]                    \n\t"
-          "sb               %[qload3],    4(%[dst])                    \n\t" /* store even 3 to dst */
-          "dpa.w.ph         $ac2,         %[p1],          %[filter12]  \n\t" /* even 5 */
-          "dpa.w.ph         $ac2,         %[p5],          %[filter34]  \n\t" /* even 5 */
-          "dpa.w.ph         $ac2,         %[p2],          %[filter56]  \n\t" /* even 5 */
-          "dpa.w.ph         $ac2,         %[p3],          %[filter78]  \n\t" /* even 5 */
-          "extp             %[Temp2],     $ac2,           31           \n\t" /* even 5 */
-          "lbux             %[st1],       %[Temp1](%[cm])              \n\t" /* even 4 */
-
-          /* even 6. pixel */
-          "mtlo             %[vector_64], $ac1                         \n\t" /* even 7 */
-          "mthi             $zero,        $ac1                         \n\t"
-          "addqh_r.w        %[qload1],    %[qload1],      %[st1]       \n\t" /* average even 4 */
-          "preceu.ph.qbl    %[p1],        %[qload2]                    \n\t"
-          "sb               %[qload1],    6(%[dst])                    \n\t" /* store even 4 to dst */
-          "ulw              %[qload3],    20(%[src])                   \n\t"
-          "dpa.w.ph         $ac3,         %[p5],          %[filter12]  \n\t" /* even 6 */
-          "dpa.w.ph         $ac3,         %[p2],          %[filter34]  \n\t" /* even 6 */
-          "dpa.w.ph         $ac3,         %[p3],          %[filter56]  \n\t" /* even 6 */
-          "dpa.w.ph         $ac3,         %[p4],          %[filter78]  \n\t" /* even 6 */
-          "lbu              %[qload2],    8(%[dst])                    \n\t" /* load even 5 from dst */
-          "extp             %[Temp3],     $ac3,           31           \n\t" /* even 6 */
-          "lbux             %[st2],       %[Temp2](%[cm])              \n\t" /* even 5 */
-
-          /* even 7. pixel */
-          "mtlo             %[vector_64], $ac2                         \n\t" /* even 8 */
-          "mthi             $zero,        $ac2                         \n\t"
-          "addqh_r.w        %[qload2],    %[qload2],      %[st2]       \n\t" /* average even 5 */
-          "preceu.ph.qbr    %[p5],        %[qload3]                    \n\t"
-          "sb               %[qload2],    8(%[dst])                    \n\t" /* store even 5 to dst */
-          "dpa.w.ph         $ac1,         %[p2],          %[filter12]  \n\t" /* even 7 */
-          "dpa.w.ph         $ac1,         %[p3],          %[filter34]  \n\t" /* even 7 */
-          "dpa.w.ph         $ac1,         %[p4],          %[filter56]  \n\t" /* even 7 */
-          "dpa.w.ph         $ac1,         %[p1],          %[filter78]  \n\t" /* even 7 */
-          "lbu              %[qload3],    10(%[dst])                   \n\t" /* load even 6 from dst */
-          "extp             %[Temp1],     $ac1,           31           \n\t" /* even 7 */
-          "lbux             %[st3],       %[Temp3](%[cm])              \n\t" /* even 6 */
-
-          "lbu              %[st2],       12(%[dst])                   \n\t" /* load even 7 from dst */
-
-          /* even 8. pixel */
-          "mtlo             %[vector_64], $ac3                         \n\t" /* odd 1 */
-          "mthi             $zero,        $ac3                         \n\t"
-          "addqh_r.w        %[qload3],    %[qload3],      %[st3]       \n\t" /* average even 6 */
-          "dpa.w.ph         $ac2,         %[p3],          %[filter12]  \n\t" /* even 8 */
-          "dpa.w.ph         $ac2,         %[p4],          %[filter34]  \n\t" /* even 8 */
-          "sb               %[qload3],    10(%[dst])                   \n\t" /* store even 6 to dst */
-          "dpa.w.ph         $ac2,         %[p1],          %[filter56]  \n\t" /* even 8 */
-          "dpa.w.ph         $ac2,         %[p5],          %[filter78]  \n\t" /* even 8 */
-          "extp             %[Temp2],     $ac2,           31           \n\t" /* even 8 */
-          "lbux             %[st1],       %[Temp1](%[cm])              \n\t" /* even 7 */
-
-          /* ODD pixels */
-          "ulw              %[qload1],    1(%[src])                   \n\t"
-          "ulw              %[qload2],    5(%[src])                    \n\t"
-
-          "addqh_r.w        %[st2],       %[st2],         %[st1]       \n\t" /* average even 7 */
-
-          /* odd 1. pixel */
-          "mtlo             %[vector_64], $ac1                         \n\t" /* odd 2 */
-          "mthi             $zero,        $ac1                         \n\t"
-          "preceu.ph.qbr    %[p1],        %[qload1]                    \n\t"
-          "preceu.ph.qbl    %[p2],        %[qload1]                    \n\t"
-          "preceu.ph.qbr    %[p3],        %[qload2]                    \n\t"
-          "preceu.ph.qbl    %[p4],        %[qload2]                    \n\t"
-          "sb               %[st2],       12(%[dst])                   \n\t" /* store even 7 to dst */
-          "ulw              %[qload3],    9(%[src])                    \n\t"
-          "dpa.w.ph         $ac3,         %[p1],          %[filter12]  \n\t" /* odd 1 */
-          "dpa.w.ph         $ac3,         %[p2],          %[filter34]  \n\t" /* odd 1 */
-          "lbu              %[qload2],    14(%[dst])                   \n\t" /* load even 8 from dst */
-          "dpa.w.ph         $ac3,         %[p3],          %[filter56]  \n\t" /* odd 1 */
-          "dpa.w.ph         $ac3,         %[p4],          %[filter78]  \n\t" /* odd 1 */
-          "extp             %[Temp3],     $ac3,           31           \n\t" /* odd 1 */
-          "lbux             %[st2],       %[Temp2](%[cm])              \n\t" /* even 8 */
-
-          "lbu              %[st1],       1(%[dst])                    \n\t" /* load odd 1 from dst */
-
-          /* odd 2. pixel */
-          "mtlo             %[vector_64], $ac2                         \n\t" /* odd 3 */
-          "mthi             $zero,        $ac2                         \n\t"
-          "addqh_r.w        %[qload2],    %[qload2],      %[st2]       \n\t" /* average even 8 */
-          "preceu.ph.qbr    %[p1],        %[qload3]                    \n\t"
-          "preceu.ph.qbl    %[p5],        %[qload3]                    \n\t"
-          "sb               %[qload2],    14(%[dst])                   \n\t" /* store even 8 to dst */
-          "ulw              %[qload1],    13(%[src])                   \n\t"
-          "dpa.w.ph         $ac1,         %[p2],          %[filter12]  \n\t" /* odd 2 */
-          "dpa.w.ph         $ac1,         %[p3],          %[filter34]  \n\t" /* odd 2 */
-          "dpa.w.ph         $ac1,         %[p4],          %[filter56]  \n\t" /* odd 2 */
-          "dpa.w.ph         $ac1,         %[p1],          %[filter78]  \n\t" /* odd 2 */
-          "lbu              %[qload3],    3(%[dst])                    \n\t" /* load odd 2 from dst */
-          "extp             %[Temp1],     $ac1,           31           \n\t" /* odd 2 */
-          "lbux             %[st3],       %[Temp3](%[cm])              \n\t" /* odd 1 */
-
-          /* odd 3. pixel */
-          "mtlo             %[vector_64], $ac3                         \n\t" /* odd 4 */
-          "mthi             $zero,        $ac3                         \n\t"
-          "addqh_r.w        %[st3],       %[st3],         %[st1]       \n\t" /* average odd 1 */
-          "preceu.ph.qbr    %[p2],        %[qload1]                    \n\t"
-          "dpa.w.ph         $ac2,         %[p3],          %[filter12]  \n\t" /* odd 3 */
-          "dpa.w.ph         $ac2,         %[p4],          %[filter34]  \n\t" /* odd 3 */
-          "dpa.w.ph         $ac2,         %[p1],          %[filter56]  \n\t" /* odd 3 */
-          "dpa.w.ph         $ac2,         %[p5],          %[filter78]  \n\t" /* odd 3 */
-          "sb               %[st3],       1(%[dst])                    \n\t" /* store odd 1 to dst */
-          "extp             %[Temp2],     $ac2,           31           \n\t" /* odd 3 */
-          "lbux             %[st1],       %[Temp1](%[cm])              \n\t" /* odd 2 */
-
-          /* odd 4. pixel */
-          "mtlo             %[vector_64], $ac1                         \n\t" /* odd 5 */
-          "mthi             $zero,        $ac1                         \n\t"
-          "addqh_r.w        %[qload3],    %[qload3],      %[st1]       \n\t" /* average odd 2 */
-          "preceu.ph.qbl    %[p3],        %[qload1]                    \n\t"
-          "sb               %[qload3],    3(%[dst])                    \n\t" /* store odd 2 to dst */
-          "lbu              %[qload1],    5(%[dst])                    \n\t" /* load odd 3 from dst */
-          "ulw              %[qload2],    17(%[src])                   \n\t"
-          "dpa.w.ph         $ac3,         %[p4],          %[filter12]  \n\t" /* odd 4 */
-          "dpa.w.ph         $ac3,         %[p1],          %[filter34]  \n\t" /* odd 4 */
-          "dpa.w.ph         $ac3,         %[p5],          %[filter56]  \n\t" /* odd 4 */
-          "dpa.w.ph         $ac3,         %[p2],          %[filter78]  \n\t" /* odd 4 */
-          "extp             %[Temp3],     $ac3,           31           \n\t" /* odd 4 */
-          "lbux             %[st2],       %[Temp2](%[cm])              \n\t" /* odd 3 */
-
-          "lbu              %[st1],       7(%[dst])                    \n\t" /* load odd 4 from dst */
-
-          /* odd 5. pixel */
-          "mtlo             %[vector_64], $ac2                         \n\t" /* odd 6 */
-          "mthi             $zero,        $ac2                         \n\t"
-          "addqh_r.w        %[qload1],    %[qload1],      %[st2]       \n\t" /* average odd 3 */
-          "preceu.ph.qbr    %[p4],        %[qload2]                    \n\t"
-          "sb               %[qload1],    5(%[dst])                    \n\t" /* store odd 3 to dst */
-          "dpa.w.ph         $ac1,         %[p1],          %[filter12]  \n\t" /* odd 5 */
-          "dpa.w.ph         $ac1,         %[p5],          %[filter34]  \n\t" /* odd 5 */
-          "dpa.w.ph         $ac1,         %[p2],          %[filter56]  \n\t" /* odd 5 */
-          "dpa.w.ph         $ac1,         %[p3],          %[filter78]  \n\t" /* odd 5 */
-          "extp             %[Temp1],     $ac1,           31           \n\t" /* odd 5 */
-          "lbux             %[st3],       %[Temp3](%[cm])              \n\t" /* odd 4 */
-
-          "lbu              %[qload1],    9(%[dst])                    \n\t" /* load odd 5 from dst */
-
-          /* odd 6. pixel */
-          "mtlo             %[vector_64], $ac3                         \n\t" /* odd 7 */
-          "mthi             $zero,        $ac3                         \n\t"
-          "addqh_r.w        %[st1],       %[st1],         %[st3]       \n\t" /* average odd 4 */
-          "preceu.ph.qbl    %[p1],        %[qload2]                    \n\t"
-          "sb               %[st1],       7(%[dst])                    \n\t" /* store odd 4 to dst */
-          "ulw              %[qload3],    21(%[src])                   \n\t"
-          "dpa.w.ph         $ac2,         %[p5],          %[filter12]  \n\t" /* odd 6 */
-          "dpa.w.ph         $ac2,         %[p2],          %[filter34]  \n\t" /* odd 6 */
-          "dpa.w.ph         $ac2,         %[p3],          %[filter56]  \n\t" /* odd 6 */
-          "dpa.w.ph         $ac2,         %[p4],          %[filter78]  \n\t" /* odd 6 */
-          "extp             %[Temp2],     $ac2,           31           \n\t" /* odd 6 */
-          "lbux             %[st1],       %[Temp1](%[cm])              \n\t" /* odd 5 */
-
-          /* odd 7. pixel */
-          "mtlo             %[vector_64], $ac1                         \n\t" /* odd 8 */
-          "mthi             $zero,        $ac1                         \n\t"
-          "addqh_r.w        %[qload1],    %[qload1],      %[st1]       \n\t" /* average odd 5 */
-          "preceu.ph.qbr    %[p5],        %[qload3]                    \n\t"
-          "sb               %[qload1],    9(%[dst])                    \n\t" /* store odd 5 to dst */
-          "lbu              %[qload2],    11(%[dst])                   \n\t" /* load odd 6 from dst */
-          "dpa.w.ph         $ac3,         %[p2],          %[filter12]  \n\t" /* odd 7 */
-          "dpa.w.ph         $ac3,         %[p3],          %[filter34]  \n\t" /* odd 7 */
-          "dpa.w.ph         $ac3,         %[p4],          %[filter56]  \n\t" /* odd 7 */
-          "dpa.w.ph         $ac3,         %[p1],          %[filter78]  \n\t" /* odd 7 */
-          "extp             %[Temp3],     $ac3,           31           \n\t" /* odd 7 */
-
-          "lbu              %[qload3],    13(%[dst])                   \n\t" /* load odd 7 from dst */
-
-          /* odd 8. pixel */
-          "dpa.w.ph         $ac1,         %[p3],          %[filter12]  \n\t" /* odd 8 */
-          "dpa.w.ph         $ac1,         %[p4],          %[filter34]  \n\t" /* odd 8 */
-          "dpa.w.ph         $ac1,         %[p1],          %[filter56]  \n\t" /* odd 8 */
-          "dpa.w.ph         $ac1,         %[p5],          %[filter78]  \n\t" /* odd 8 */
-          "extp             %[Temp1],     $ac1,           31           \n\t" /* odd 8 */
-
-          "lbu              %[qload1],    15(%[dst])                   \n\t" /* load odd 8 from dst */
-
-          "lbux             %[st2],       %[Temp2](%[cm])              \n\t" /* odd 6 */
-          "addqh_r.w        %[qload2],    %[qload2],      %[st2]       \n\t" /* average odd 6 */
-
-          "lbux             %[st3],       %[Temp3](%[cm])              \n\t" /* odd 7 */
-          "addqh_r.w        %[qload3],    %[qload3],      %[st3]       \n\t" /* average odd 7 */
-
-          "lbux             %[st1],       %[Temp1](%[cm])              \n\t" /* odd 8 */
-          "addqh_r.w        %[qload1],    %[qload1],      %[st1]       \n\t" /* average odd 8 */
-
-          "sb               %[qload2],    11(%[dst])                   \n\t" /* store odd 6 to dst */
-          "sb               %[qload3],    13(%[dst])                   \n\t" /* store odd 7 to dst */
-          "sb               %[qload1],    15(%[dst])                   \n\t" /* store odd 8 to dst */
-
-          : [qload1] "=&r" (qload1), [qload2] "=&r" (qload2),
-            [st1] "=&r" (st1), [st2] "=&r" (st2), [st3] "=&r" (st3),
-            [p1] "=&r" (p1), [p2] "=&r" (p2), [p3] "=&r" (p3), [p4] "=&r" (p4),
-            [qload3] "=&r" (qload3), [p5] "=&r" (p5),
-            [Temp1] "=&r" (Temp1), [Temp2] "=&r" (Temp2), [Temp3] "=&r" (Temp3)
-          : [filter12] "r" (filter12), [filter34] "r" (filter34),
-            [filter56] "r" (filter56), [filter78] "r" (filter78),
-            [vector_64] "r" (vector_64),
-            [cm] "r" (cm), [dst] "r" (dst), [src] "r" (src)
-      );
-
-      src += 16;
-      dst += 16;
-    }
-
-    /* Next row... */
-    src_ptr += src_stride;
-    dst_ptr += dst_stride;
-  }
-}
-
-void vpx_convolve8_avg_horiz_dspr2(const uint8_t *src, ptrdiff_t src_stride,
-                                   uint8_t *dst, ptrdiff_t dst_stride,
-                                   const int16_t *filter_x, int x_step_q4,
-                                   const int16_t *filter_y, int y_step_q4,
-                                   int w, int h) {
-  if (((const int32_t *)filter_x)[1] == 0x800000) {
-    vpx_convolve_avg(src, src_stride,
-                     dst, dst_stride,
-                     filter_x, x_step_q4,
-                     filter_y, y_step_q4,
-                     w, h);
-  } else if (((const int32_t *)filter_x)[0] == 0) {
-    vpx_convolve2_avg_horiz_dspr2(src, src_stride,
-                                  dst, dst_stride,
-                                  filter_x, x_step_q4,
-                                  filter_y, y_step_q4,
-                                  w, h);
-  } else {
-    if (16 == x_step_q4) {
-      uint32_t pos = 38;
-
-      src -= 3;
-
-      /* bit positon for extract from acc */
-      __asm__ __volatile__ (
-        "wrdsp      %[pos],     1           \n\t"
-        :
-        : [pos] "r" (pos)
-      );
-
-      /* prefetch data to cache memory */
-      prefetch_load(src);
-      prefetch_load(src + 32);
-      prefetch_store(dst);
-
-      switch (w) {
-        case 4:
-          convolve_avg_horiz_4_dspr2(src, src_stride,
-                                     dst, dst_stride,
-                                     filter_x, h);
-          break;
-        case 8:
-          convolve_avg_horiz_8_dspr2(src, src_stride,
-                                     dst, dst_stride,
-                                     filter_x, h);
-          break;
-        case 16:
-          convolve_avg_horiz_16_dspr2(src, src_stride,
-                                      dst, dst_stride,
-                                      filter_x, h, 1);
-          break;
-        case 32:
-          convolve_avg_horiz_16_dspr2(src, src_stride,
-                                      dst, dst_stride,
-                                      filter_x, h, 2);
-          break;
-        case 64:
-          prefetch_load(src + 64);
-          prefetch_store(dst + 32);
-
-          convolve_avg_horiz_64_dspr2(src, src_stride,
-                                      dst, dst_stride,
-                                      filter_x, h);
-          break;
-        default:
-          vpx_convolve8_avg_horiz_c(src + 3, src_stride,
-                                    dst, dst_stride,
-                                    filter_x, x_step_q4,
-                                    filter_y, y_step_q4,
-                                    w, h);
-          break;
-      }
-    } else {
-      vpx_convolve8_avg_horiz_c(src, src_stride,
-                                dst, dst_stride,
-                                filter_x, x_step_q4,
-                                filter_y, y_step_q4,
-                                w, h);
-    }
-  }
-}
-#endif
--- a/vpx_dsp/mips/vpx_convolve8_dspr2.c
+++ /dev/null
@@ -1,1282 +1,0 @@
-/*
- *  Copyright (c) 2013 The WebM project authors. All Rights Reserved.
- *
- *  Use of this source code is governed by a BSD-style license
- *  that can be found in the LICENSE file in the root of the source
- *  tree. An additional intellectual property rights grant can be found
- *  in the file PATENTS.  All contributing project authors may
- *  be found in the AUTHORS file in the root of the source tree.
- */
-
-#include <assert.h>
-#include <stdio.h>
-
-#include "./vpx_dsp_rtcd.h"
-#include "vpx_dsp/mips/vpx_common_dspr2.h"
-#include "vpx_dsp/vpx_dsp_common.h"
-#include "vpx_dsp/vpx_filter.h"
-#include "vpx_ports/mem.h"
-
-#if HAVE_DSPR2
-uint8_t vpx_ff_cropTbl_a[256 + 2 * CROP_WIDTH];
-uint8_t *vpx_ff_cropTbl;
-
-void vpx_dsputil_static_init(void) {
-  int i;
-
-  for (i = 0; i < 256; i++) vpx_ff_cropTbl_a[i + CROP_WIDTH] = i;
-
-  for (i = 0; i < CROP_WIDTH; i++) {
-    vpx_ff_cropTbl_a[i] = 0;
-    vpx_ff_cropTbl_a[i + CROP_WIDTH + 256] = 255;
-  }
-
-  vpx_ff_cropTbl = &vpx_ff_cropTbl_a[CROP_WIDTH];
-}
-
-static void convolve_horiz_4_transposed_dspr2(const uint8_t *src,
-                                              int32_t src_stride,
-                                              uint8_t *dst,
-                                              int32_t dst_stride,
-                                              const int16_t *filter_x0,
-                                              int32_t h) {
-  int32_t y;
-  uint8_t *cm = vpx_ff_cropTbl;
-  uint8_t *dst_ptr;
-  int32_t vector1b, vector2b, vector3b, vector4b;
-  int32_t Temp1, Temp2, Temp3, Temp4;
-  uint32_t vector4a = 64;
-  uint32_t tp1, tp2;
-  uint32_t p1, p2, p3, p4;
-  uint32_t tn1, tn2;
-
-  vector1b = ((const int32_t *)filter_x0)[0];
-  vector2b = ((const int32_t *)filter_x0)[1];
-  vector3b = ((const int32_t *)filter_x0)[2];
-  vector4b = ((const int32_t *)filter_x0)[3];
-
-  for (y = h; y--;) {
-    dst_ptr = dst;
-    /* prefetch data to cache memory */
-    prefetch_load(src + src_stride);
-    prefetch_load(src + src_stride + 32);
-
-    __asm__ __volatile__ (
-        "ulw              %[tp1],         0(%[src])                      \n\t"
-        "ulw              %[tp2],         4(%[src])                      \n\t"
-
-        /* even 1. pixel */
-        "mtlo             %[vector4a],    $ac3                           \n\t"
-        "mthi             $zero,          $ac3                           \n\t"
-        "preceu.ph.qbr    %[p1],          %[tp1]                         \n\t"
-        "preceu.ph.qbl    %[p2],          %[tp1]                         \n\t"
-        "preceu.ph.qbr    %[p3],          %[tp2]                         \n\t"
-        "preceu.ph.qbl    %[p4],          %[tp2]                         \n\t"
-        "dpa.w.ph         $ac3,           %[p1],          %[vector1b]    \n\t"
-        "dpa.w.ph         $ac3,           %[p2],          %[vector2b]    \n\t"
-        "dpa.w.ph         $ac3,           %[p3],          %[vector3b]    \n\t"
-        "ulw              %[tn2],         8(%[src])                      \n\t"
-        "dpa.w.ph         $ac3,           %[p4],          %[vector4b]    \n\t"
-        "extp             %[Temp1],       $ac3,           31             \n\t"
-
-        /* even 2. pixel */
-        "mtlo             %[vector4a],    $ac2                           \n\t"
-        "mthi             $zero,          $ac2                           \n\t"
-        "preceu.ph.qbr    %[p1],          %[tn2]                         \n\t"
-        "balign           %[tn1],         %[tn2],         3              \n\t"
-        "balign           %[tn2],         %[tp2],         3              \n\t"
-        "balign           %[tp2],         %[tp1],         3              \n\t"
-        "dpa.w.ph         $ac2,           %[p2],          %[vector1b]    \n\t"
-        "dpa.w.ph         $ac2,           %[p3],          %[vector2b]    \n\t"
-        "dpa.w.ph         $ac2,           %[p4],          %[vector3b]    \n\t"
-        "dpa.w.ph         $ac2,           %[p1],          %[vector4b]    \n\t"
-        "extp             %[Temp3],       $ac2,           31             \n\t"
-
-        /* odd 1. pixel */
-        "lbux             %[tp1],         %[Temp1](%[cm])                \n\t"
-        "mtlo             %[vector4a],    $ac3                           \n\t"
-        "mthi             $zero,          $ac3                           \n\t"
-        "preceu.ph.qbr    %[p1],          %[tp2]                         \n\t"
-        "preceu.ph.qbl    %[p2],          %[tp2]                         \n\t"
-        "preceu.ph.qbr    %[p3],          %[tn2]                         \n\t"
-        "preceu.ph.qbl    %[p4],          %[tn2]                         \n\t"
-        "dpa.w.ph         $ac3,           %[p1],          %[vector1b]    \n\t"
-        "dpa.w.ph         $ac3,           %[p2],          %[vector2b]    \n\t"
-        "dpa.w.ph         $ac3,           %[p3],          %[vector3b]    \n\t"
-        "dpa.w.ph         $ac3,           %[p4],          %[vector4b]    \n\t"
-        "extp             %[Temp2],       $ac3,           31             \n\t"
-
-        /* odd 2. pixel */
-        "lbux             %[tp2],         %[Temp3](%[cm])                \n\t"
-        "mtlo             %[vector4a],    $ac2                           \n\t"
-        "mthi             $zero,          $ac2                           \n\t"
-        "preceu.ph.qbr    %[p1],          %[tn1]                         \n\t"
-        "dpa.w.ph         $ac2,           %[p2],          %[vector1b]    \n\t"
-        "dpa.w.ph         $ac2,           %[p3],          %[vector2b]    \n\t"
-        "dpa.w.ph         $ac2,           %[p4],          %[vector3b]    \n\t"
-        "dpa.w.ph         $ac2,           %[p1],          %[vector4b]    \n\t"
-        "extp             %[Temp4],       $ac2,           31             \n\t"
-
-        /* clamp */
-        "lbux             %[tn1],         %[Temp2](%[cm])                \n\t"
-        "lbux             %[p2],          %[Temp4](%[cm])                \n\t"
-
-        /* store bytes */
-        "sb               %[tp1],         0(%[dst_ptr])                  \n\t"
-        "addu             %[dst_ptr],     %[dst_ptr],     %[dst_stride]  \n\t"
-
-        "sb               %[tn1],         0(%[dst_ptr])                  \n\t"
-        "addu             %[dst_ptr],     %[dst_ptr],     %[dst_stride]  \n\t"
-
-        "sb               %[tp2],         0(%[dst_ptr])                  \n\t"
-        "addu             %[dst_ptr],     %[dst_ptr],     %[dst_stride]  \n\t"
-
-        "sb               %[p2],          0(%[dst_ptr])                  \n\t"
-        "addu             %[dst_ptr],     %[dst_ptr],     %[dst_stride]  \n\t"
-
-        : [tp1] "=&r" (tp1), [tp2] "=&r" (tp2), [tn1] "=&r" (tn1), [tn2] "=&r" (tn2),
-          [p1] "=&r" (p1), [p2] "=&r" (p2), [p3] "=&r" (p3), [p4] "=&r" (p4),
-          [Temp1] "=&r" (Temp1), [Temp2] "=&r" (Temp2), [Temp3] "=&r" (Temp3), [Temp4] "=&r" (Temp4),
-          [dst_ptr] "+r" (dst_ptr)
-        : [vector1b] "r" (vector1b), [vector2b] "r" (vector2b),
-          [vector3b] "r" (vector3b), [vector4b] "r" (vector4b),
-          [vector4a] "r" (vector4a),
-          [cm] "r" (cm), [src] "r" (src), [dst_stride] "r" (dst_stride)
-    );
-
-    /* Next row... */
-    src += src_stride;
-    dst += 1;
-  }
-}
-
-static void convolve_horiz_8_transposed_dspr2(const uint8_t *src,
-                                              int32_t src_stride,
-                                              uint8_t *dst,
-                                              int32_t dst_stride,
-                                              const int16_t *filter_x0,
-                                              int32_t h) {
-  int32_t y;
-  uint8_t *cm = vpx_ff_cropTbl;
-  uint8_t *dst_ptr;
-  uint32_t vector4a = 64;
-  int32_t vector1b, vector2b, vector3b, vector4b;
-  int32_t Temp1, Temp2, Temp3;
-  uint32_t tp1, tp2, tp3;
-  uint32_t p1, p2, p3, p4, n1;
-  uint8_t *odd_dst;
-  uint32_t dst_pitch_2 = (dst_stride << 1);
-
-  vector1b = ((const int32_t *)filter_x0)[0];
-  vector2b = ((const int32_t *)filter_x0)[1];
-  vector3b = ((const int32_t *)filter_x0)[2];
-  vector4b = ((const int32_t *)filter_x0)[3];
-
-  for (y = h; y--;) {
-    /* prefetch data to cache memory */
-    prefetch_load(src + src_stride);
-    prefetch_load(src + src_stride + 32);
-
-    dst_ptr = dst;
-    odd_dst = (dst_ptr + dst_stride);
-
-    __asm__ __volatile__ (
-        "ulw              %[tp2],         0(%[src])                       \n\t"
-        "ulw              %[tp1],         4(%[src])                       \n\t"
-
-        /* even 1. pixel */
-        "mtlo             %[vector4a],    $ac3                            \n\t"
-        "mthi             $zero,          $ac3                            \n\t"
-        "mtlo             %[vector4a],    $ac2                            \n\t"
-        "mthi             $zero,          $ac2                            \n\t"
-        "preceu.ph.qbr    %[p1],          %[tp2]                          \n\t"
-        "preceu.ph.qbl    %[p2],          %[tp2]                          \n\t"
-        "preceu.ph.qbr    %[p3],          %[tp1]                          \n\t"
-        "preceu.ph.qbl    %[p4],          %[tp1]                          \n\t"
-        "ulw              %[tp3],         8(%[src])                       \n\t"
-        "dpa.w.ph         $ac3,           %[p1],          %[vector1b]     \n\t"
-        "dpa.w.ph         $ac3,           %[p2],          %[vector2b]     \n\t"
-        "dpa.w.ph         $ac3,           %[p3],          %[vector3b]     \n\t"
-        "dpa.w.ph         $ac3,           %[p4],          %[vector4b]     \n\t"
-        "extp             %[Temp1],       $ac3,           31              \n\t"
-
-        /* even 2. pixel */
-        "preceu.ph.qbr    %[p1],          %[tp3]                          \n\t"
-        "preceu.ph.qbl    %[n1],          %[tp3]                          \n\t"
-        "ulw              %[tp2],         12(%[src])                      \n\t"
-        "dpa.w.ph         $ac2,           %[p2],          %[vector1b]     \n\t"
-        "dpa.w.ph         $ac2,           %[p3],          %[vector2b]     \n\t"
-        "dpa.w.ph         $ac2,           %[p4],          %[vector3b]     \n\t"
-        "dpa.w.ph         $ac2,           %[p1],          %[vector4b]     \n\t"
-        "extp             %[Temp3],       $ac2,           31              \n\t"
-
-        /* even 3. pixel */
-        "lbux             %[Temp2],       %[Temp1](%[cm])                 \n\t"
-        "mtlo             %[vector4a],    $ac1                            \n\t"
-        "mthi             $zero,          $ac1                            \n\t"
-        "preceu.ph.qbr    %[p2],          %[tp2]                          \n\t"
-        "dpa.w.ph         $ac1,           %[p3],          %[vector1b]     \n\t"
-        "dpa.w.ph         $ac1,           %[p4],          %[vector2b]     \n\t"
-        "dpa.w.ph         $ac1,           %[p1],          %[vector3b]     \n\t"
-        "lbux             %[tp3],         %[Temp3](%[cm])                 \n\t"
-        "dpa.w.ph         $ac1,           %[n1],          %[vector4b]     \n\t"
-        "extp             %[p3],          $ac1,           31              \n\t"
-
-        /* even 4. pixel */
-        "mtlo             %[vector4a],    $ac2                            \n\t"
-        "mthi             $zero,          $ac2                            \n\t"
-        "mtlo             %[vector4a],    $ac3                            \n\t"
-        "mthi             $zero,          $ac3                            \n\t"
-        "sb               %[Temp2],       0(%[dst_ptr])                   \n\t"
-        "addu             %[dst_ptr],     %[dst_ptr],     %[dst_pitch_2]  \n\t"
-        "sb               %[tp3],         0(%[dst_ptr])                   \n\t"
-        "addu             %[dst_ptr],     %[dst_ptr],     %[dst_pitch_2]  \n\t"
-
-        "ulw              %[tp1],         1(%[src])                       \n\t"
-        "ulw              %[tp3],         5(%[src])                       \n\t"
-
-        "dpa.w.ph         $ac2,           %[p4],          %[vector1b]     \n\t"
-        "dpa.w.ph         $ac2,           %[p1],          %[vector2b]     \n\t"
-        "dpa.w.ph         $ac2,           %[n1],          %[vector3b]     \n\t"
-        "dpa.w.ph         $ac2,           %[p2],          %[vector4b]     \n\t"
-        "extp             %[Temp3],       $ac2,           31              \n\t"
-
-        "lbux             %[tp2],         %[p3](%[cm])                    \n\t"
-
-        /* odd 1. pixel */
-        "mtlo             %[vector4a],    $ac1                            \n\t"
-        "mthi             $zero,          $ac1                            \n\t"
-        "preceu.ph.qbr    %[p1],          %[tp1]                          \n\t"
-        "preceu.ph.qbl    %[p2],          %[tp1]                          \n\t"
-        "preceu.ph.qbr    %[p3],          %[tp3]                          \n\t"
-        "preceu.ph.qbl    %[p4],          %[tp3]                          \n\t"
-        "sb               %[tp2],         0(%[dst_ptr])                   \n\t"
-        "addu             %[dst_ptr],     %[dst_ptr],     %[dst_pitch_2]  \n\t"
-        "ulw              %[tp2],         9(%[src])                       \n\t"
-
-        "dpa.w.ph         $ac3,           %[p1],          %[vector1b]     \n\t"
-        "dpa.w.ph         $ac3,           %[p2],          %[vector2b]     \n\t"
-        "dpa.w.ph         $ac3,           %[p3],          %[vector3b]     \n\t"
-        "dpa.w.ph         $ac3,           %[p4],          %[vector4b]     \n\t"
-        "extp             %[Temp2],       $ac3,           31              \n\t"
-
-        /* odd 2. pixel */
-        "lbux             %[tp1],         %[Temp3](%[cm])                 \n\t"
-        "mtlo             %[vector4a],    $ac3                            \n\t"
-        "mthi             $zero,          $ac3                            \n\t"
-        "mtlo             %[vector4a],    $ac2                            \n\t"
-        "mthi             $zero,          $ac2                            \n\t"
-        "preceu.ph.qbr    %[p1],          %[tp2]                          \n\t"
-        "preceu.ph.qbl    %[n1],          %[tp2]                          \n\t"
-        "ulw              %[Temp1],       13(%[src])                      \n\t"
-        "dpa.w.ph         $ac1,           %[p2],          %[vector1b]     \n\t"
-        "sb               %[tp1],         0(%[dst_ptr])                   \n\t"
-        "addu             %[dst_ptr],     %[dst_ptr],     %[dst_pitch_2]  \n\t"
-        "dpa.w.ph         $ac1,           %[p3],          %[vector2b]     \n\t"
-        "dpa.w.ph         $ac1,           %[p4],          %[vector3b]     \n\t"
-        "dpa.w.ph         $ac1,           %[p1],          %[vector4b]     \n\t"
-        "extp             %[Temp3],       $ac1,           31              \n\t"
-
-        /* odd 3. pixel */
-        "lbux             %[tp3],         %[Temp2](%[cm])                 \n\t"
-        "preceu.ph.qbr    %[p2],          %[Temp1]                        \n\t"
-        "dpa.w.ph         $ac3,           %[p3],          %[vector1b]     \n\t"
-        "dpa.w.ph         $ac3,           %[p4],          %[vector2b]     \n\t"
-        "dpa.w.ph         $ac3,           %[p1],          %[vector3b]     \n\t"
-        "dpa.w.ph         $ac3,           %[n1],          %[vector4b]     \n\t"
-        "extp             %[Temp2],       $ac3,           31              \n\t"
-
-        /* odd 4. pixel */
-        "sb               %[tp3],         0(%[odd_dst])                   \n\t"
-        "addu             %[odd_dst],     %[odd_dst],     %[dst_pitch_2]  \n\t"
-        "dpa.w.ph         $ac2,           %[p4],          %[vector1b]     \n\t"
-        "dpa.w.ph         $ac2,           %[p1],          %[vector2b]     \n\t"
-        "dpa.w.ph         $ac2,           %[n1],          %[vector3b]     \n\t"
-        "dpa.w.ph         $ac2,           %[p2],          %[vector4b]     \n\t"
-        "extp             %[Temp1],       $ac2,           31              \n\t"
-
-        /* clamp */
-        "lbux             %[p4],          %[Temp3](%[cm])                 \n\t"
-        "lbux             %[p2],          %[Temp2](%[cm])                 \n\t"
-        "lbux             %[n1],          %[Temp1](%[cm])                 \n\t"
-
-        /* store bytes */
-        "sb               %[p4],          0(%[odd_dst])                   \n\t"
-        "addu             %[odd_dst],     %[odd_dst],     %[dst_pitch_2]  \n\t"
-
-        "sb               %[p2],          0(%[odd_dst])                   \n\t"
-        "addu             %[odd_dst],     %[odd_dst],     %[dst_pitch_2]  \n\t"
-
-        "sb               %[n1],          0(%[odd_dst])                   \n\t"
-
-        : [tp1] "=&r" (tp1), [tp2] "=&r" (tp2), [tp3] "=&r" (tp3),
-          [p1] "=&r" (p1), [p2] "=&r" (p2), [p3] "=&r" (p3), [p4] "=&r" (p4),
-          [n1] "=&r" (n1),
-          [Temp1] "=&r" (Temp1), [Temp2] "=&r" (Temp2), [Temp3] "=&r" (Temp3),
-          [dst_ptr] "+r" (dst_ptr), [odd_dst] "+r" (odd_dst)
-        : [vector1b] "r" (vector1b), [vector2b] "r" (vector2b),
-          [vector3b] "r" (vector3b), [vector4b] "r" (vector4b),
-          [vector4a] "r" (vector4a), [cm] "r" (cm),
-          [src] "r" (src), [dst_pitch_2] "r" (dst_pitch_2)
-    );
-
-    /* Next row... */
-    src += src_stride;
-    dst += 1;
-  }
-}
-
-static void convolve_horiz_16_transposed_dspr2(const uint8_t *src_ptr,
-                                               int32_t src_stride,
-                                               uint8_t *dst_ptr,
-                                               int32_t dst_stride,
-                                               const int16_t *filter_x0,
-                                               int32_t h,
-                                               int32_t count) {
-  int32_t c, y;
-  const uint8_t *src;
-  uint8_t *dst;
-  uint8_t *cm = vpx_ff_cropTbl;
-  uint32_t vector_64 = 64;
-  int32_t  filter12, filter34, filter56, filter78;
-  int32_t  Temp1, Temp2, Temp3;
-  uint32_t qload1, qload2;
-  uint32_t p1, p2, p3, p4, p5;
-  uint32_t st1, st2, st3;
-  uint32_t dst_pitch_2 = (dst_stride << 1);
-  uint8_t  *odd_dst;
-
-  filter12 = ((const int32_t *)filter_x0)[0];
-  filter34 = ((const int32_t *)filter_x0)[1];
-  filter56 = ((const int32_t *)filter_x0)[2];
-  filter78 = ((const int32_t *)filter_x0)[3];
-
-  for (y = h; y--;) {
-    /* prefetch data to cache memory */
-    prefetch_load(src_ptr + src_stride);
-    prefetch_load(src_ptr + src_stride + 32);
-
-    src = src_ptr;
-    dst = dst_ptr;
-
-    odd_dst = (dst + dst_stride);
-
-    for (c = 0; c < count; c++) {
-      __asm__ __volatile__ (
-          "ulw              %[qload1],        0(%[src])                       \n\t"
-          "ulw              %[qload2],        4(%[src])                       \n\t"
-
-          /* even 1. pixel */
-          "mtlo             %[vector_64],     $ac1                            \n\t" /* even 1 */
-          "mthi             $zero,            $ac1                            \n\t"
-          "mtlo             %[vector_64],     $ac2                            \n\t" /* even 2 */
-          "mthi             $zero,            $ac2                            \n\t"
-          "preceu.ph.qbr    %[p3],            %[qload2]                       \n\t"
-          "preceu.ph.qbl    %[p4],            %[qload2]                       \n\t"
-          "preceu.ph.qbr    %[p1],            %[qload1]                       \n\t"
-          "preceu.ph.qbl    %[p2],            %[qload1]                       \n\t"
-          "ulw              %[qload2],        8(%[src])                       \n\t"
-          "dpa.w.ph         $ac1,             %[p1],          %[filter12]     \n\t" /* even 1 */
-          "dpa.w.ph         $ac1,             %[p2],          %[filter34]     \n\t" /* even 1 */
-          "dpa.w.ph         $ac1,             %[p3],          %[filter56]     \n\t" /* even 1 */
-          "dpa.w.ph         $ac1,             %[p4],          %[filter78]     \n\t" /* even 1 */
-          "extp             %[Temp1],         $ac1,           31              \n\t" /* even 1 */
-
-          /* even 2. pixel */
-          "mtlo             %[vector_64],     $ac3                            \n\t" /* even 3 */
-          "mthi             $zero,            $ac3                            \n\t"
-          "preceu.ph.qbr    %[p1],            %[qload2]                       \n\t"
-          "preceu.ph.qbl    %[p5],            %[qload2]                       \n\t"
-          "ulw              %[qload1],        12(%[src])                      \n\t"
-          "dpa.w.ph         $ac2,             %[p2],          %[filter12]     \n\t" /* even 1 */
-          "dpa.w.ph         $ac2,             %[p3],          %[filter34]     \n\t" /* even 1 */
-          "dpa.w.ph         $ac2,             %[p4],          %[filter56]     \n\t" /* even 1 */
-          "dpa.w.ph         $ac2,             %[p1],          %[filter78]     \n\t" /* even 1 */
-          "lbux             %[st1],           %[Temp1](%[cm])                 \n\t" /* even 1 */
-          "extp             %[Temp2],         $ac2,           31              \n\t" /* even 1 */
-
-          /* even 3. pixel */
-          "mtlo             %[vector_64],     $ac1                            \n\t" /* even 4 */
-          "mthi             $zero,            $ac1                            \n\t"
-          "preceu.ph.qbr    %[p2],            %[qload1]                       \n\t"
-          "sb               %[st1],           0(%[dst])                       \n\t" /* even 1 */
-          "addu             %[dst],           %[dst],         %[dst_pitch_2]             \n\t"
-          "dpa.w.ph         $ac3,             %[p3],          %[filter12]     \n\t" /* even 3 */
-          "dpa.w.ph         $ac3,             %[p4],          %[filter34]     \n\t" /* even 3 */
-          "dpa.w.ph         $ac3,             %[p1],          %[filter56]     \n\t" /* even 3 */
-          "dpa.w.ph         $ac3,             %[p5],          %[filter78]     \n\t" /* even 3 */
-          "extp             %[Temp3],         $ac3,           31              \n\t" /* even 3 */
-          "lbux             %[st2],           %[Temp2](%[cm])                 \n\t" /* even 1 */
-
-          /* even 4. pixel */
-          "mtlo             %[vector_64],     $ac2                            \n\t" /* even 5 */
-          "mthi             $zero,            $ac2                            \n\t"
-          "preceu.ph.qbl    %[p3],            %[qload1]                       \n\t"
-          "sb               %[st2],           0(%[dst])                       \n\t" /* even 2 */
-          "addu             %[dst],           %[dst],         %[dst_pitch_2]  \n\t"
-          "ulw              %[qload2],        16(%[src])                      \n\t"
-          "dpa.w.ph         $ac1,             %[p4],          %[filter12]     \n\t" /* even 4 */
-          "dpa.w.ph         $ac1,             %[p1],          %[filter34]     \n\t" /* even 4 */
-          "dpa.w.ph         $ac1,             %[p5],          %[filter56]     \n\t" /* even 4 */
-          "dpa.w.ph         $ac1,             %[p2],          %[filter78]     \n\t" /* even 4 */
-          "extp             %[Temp1],         $ac1,           31              \n\t" /* even 4 */
-          "lbux             %[st3],           %[Temp3](%[cm])                 \n\t" /* even 3 */
-
-          /* even 5. pixel */
-          "mtlo             %[vector_64],     $ac3                            \n\t" /* even 6 */
-          "mthi             $zero,            $ac3                            \n\t"
-          "preceu.ph.qbr    %[p4],            %[qload2]                       \n\t"
-          "sb               %[st3],           0(%[dst])                       \n\t" /* even 3 */
-          "addu             %[dst],           %[dst],         %[dst_pitch_2]  \n\t"
-          "dpa.w.ph         $ac2,             %[p1],          %[filter12]     \n\t" /* even 5 */
-          "dpa.w.ph         $ac2,             %[p5],          %[filter34]     \n\t" /* even 5 */
-          "dpa.w.ph         $ac2,             %[p2],          %[filter56]     \n\t" /* even 5 */
-          "dpa.w.ph         $ac2,             %[p3],          %[filter78]     \n\t" /* even 5 */
-          "extp             %[Temp2],         $ac2,           31              \n\t" /* even 5 */
-          "lbux             %[st1],           %[Temp1](%[cm])                 \n\t" /* even 4 */
-
-          /* even 6. pixel */
-          "mtlo             %[vector_64],     $ac1                            \n\t" /* even 7 */
-          "mthi             $zero,            $ac1                            \n\t"
-          "preceu.ph.qbl    %[p1],            %[qload2]                       \n\t"
-          "sb               %[st1],           0(%[dst])                       \n\t" /* even 4 */
-          "addu             %[dst],           %[dst],         %[dst_pitch_2]  \n\t"
-          "ulw              %[qload1],        20(%[src])                      \n\t"
-          "dpa.w.ph         $ac3,             %[p5],          %[filter12]     \n\t" /* even 6 */
-          "dpa.w.ph         $ac3,             %[p2],          %[filter34]     \n\t" /* even 6 */
-          "dpa.w.ph         $ac3,             %[p3],          %[filter56]     \n\t" /* even 6 */
-          "dpa.w.ph         $ac3,             %[p4],          %[filter78]     \n\t" /* even 6 */
-          "extp             %[Temp3],         $ac3,           31              \n\t" /* even 6 */
-          "lbux             %[st2],           %[Temp2](%[cm])                 \n\t" /* even 5 */
-
-          /* even 7. pixel */
-          "mtlo             %[vector_64],     $ac2                            \n\t" /* even 8 */
-          "mthi             $zero,            $ac2                            \n\t"
-          "preceu.ph.qbr    %[p5],            %[qload1]                       \n\t"
-          "sb               %[st2],           0(%[dst])                       \n\t" /* even 5 */
-          "addu             %[dst],           %[dst],         %[dst_pitch_2]  \n\t"
-          "dpa.w.ph         $ac1,             %[p2],          %[filter12]     \n\t" /* even 7 */
-          "dpa.w.ph         $ac1,             %[p3],          %[filter34]     \n\t" /* even 7 */
-          "dpa.w.ph         $ac1,             %[p4],          %[filter56]     \n\t" /* even 7 */
-          "dpa.w.ph         $ac1,             %[p1],          %[filter78]     \n\t" /* even 7 */
-          "extp             %[Temp1],         $ac1,           31              \n\t" /* even 7 */
-          "lbux             %[st3],           %[Temp3](%[cm])                 \n\t" /* even 6 */
-
-          /* even 8. pixel */
-          "mtlo             %[vector_64],     $ac3                            \n\t" /* odd 1 */
-          "mthi             $zero,            $ac3                            \n\t"
-          "dpa.w.ph         $ac2,             %[p3],          %[filter12]     \n\t" /* even 8 */
-          "dpa.w.ph         $ac2,             %[p4],          %[filter34]     \n\t" /* even 8 */
-          "sb               %[st3],           0(%[dst])                       \n\t" /* even 6 */
-          "addu             %[dst],           %[dst],         %[dst_pitch_2]  \n\t"
-          "dpa.w.ph         $ac2,             %[p1],          %[filter56]     \n\t" /* even 8 */
-          "dpa.w.ph         $ac2,             %[p5],          %[filter78]     \n\t" /* even 8 */
-          "extp             %[Temp2],         $ac2,           31              \n\t" /* even 8 */
-          "lbux             %[st1],           %[Temp1](%[cm])                 \n\t" /* even 7 */
-
-          /* ODD pixels */
-          "ulw              %[qload1],        1(%[src])                       \n\t"
-          "ulw              %[qload2],        5(%[src])                       \n\t"
-
-          /* odd 1. pixel */
-          "mtlo             %[vector_64],     $ac1                            \n\t" /* odd 2 */
-          "mthi             $zero,            $ac1                            \n\t"
-          "preceu.ph.qbr    %[p1],            %[qload1]                       \n\t"
-          "preceu.ph.qbl    %[p2],            %[qload1]                       \n\t"
-          "preceu.ph.qbr    %[p3],            %[qload2]                       \n\t"
-          "preceu.ph.qbl    %[p4],            %[qload2]                       \n\t"
-          "sb               %[st1],           0(%[dst])                       \n\t" /* even 7 */
-          "addu             %[dst],           %[dst],         %[dst_pitch_2]  \n\t"
-          "ulw              %[qload2],        9(%[src])                       \n\t"
-          "dpa.w.ph         $ac3,             %[p1],          %[filter12]     \n\t" /* odd 1 */
-          "dpa.w.ph         $ac3,             %[p2],          %[filter34]     \n\t" /* odd 1 */
-          "dpa.w.ph         $ac3,             %[p3],          %[filter56]     \n\t" /* odd 1 */
-          "dpa.w.ph         $ac3,             %[p4],          %[filter78]     \n\t" /* odd 1 */
-          "extp             %[Temp3],         $ac3,           31              \n\t" /* odd 1 */
-          "lbux             %[st2],           %[Temp2](%[cm])                 \n\t" /* even 8 */
-
-          /* odd 2. pixel */
-          "mtlo             %[vector_64],     $ac2                            \n\t" /* odd 3 */
-          "mthi             $zero,            $ac2                            \n\t"
-          "preceu.ph.qbr    %[p1],            %[qload2]                       \n\t"
-          "preceu.ph.qbl    %[p5],            %[qload2]                       \n\t"
-          "sb               %[st2],           0(%[dst])                       \n\t" /* even 8 */
-          "ulw              %[qload1],        13(%[src])                      \n\t"
-          "dpa.w.ph         $ac1,             %[p2],          %[filter12]     \n\t" /* odd 2 */
-          "dpa.w.ph         $ac1,             %[p3],          %[filter34]     \n\t" /* odd 2 */
-          "dpa.w.ph         $ac1,             %[p4],          %[filter56]     \n\t" /* odd 2 */
-          "dpa.w.ph         $ac1,             %[p1],          %[filter78]     \n\t" /* odd 2 */
-          "extp             %[Temp1],         $ac1,           31              \n\t" /* odd 2 */
-          "lbux             %[st3],           %[Temp3](%[cm])                 \n\t" /* odd 1 */
-
-          /* odd 3. pixel */
-          "mtlo             %[vector_64],     $ac3                            \n\t" /* odd 4 */
-          "mthi             $zero,            $ac3                            \n\t"
-          "preceu.ph.qbr    %[p2],            %[qload1]                       \n\t"
-          "sb               %[st3],           0(%[odd_dst])                   \n\t" /* odd 1 */
-          "addu             %[odd_dst],       %[odd_dst],     %[dst_pitch_2]  \n\t"
-          "dpa.w.ph         $ac2,             %[p3],          %[filter12]     \n\t" /* odd 3 */
-          "dpa.w.ph         $ac2,             %[p4],          %[filter34]     \n\t" /* odd 3 */
-          "dpa.w.ph         $ac2,             %[p1],          %[filter56]     \n\t" /* odd 3 */
-          "dpa.w.ph         $ac2,             %[p5],          %[filter78]     \n\t" /* odd 3 */
-          "extp             %[Temp2],         $ac2,           31              \n\t" /* odd 3 */
-          "lbux             %[st1],           %[Temp1](%[cm])                 \n\t" /* odd 2 */
-
-          /* odd 4. pixel */
-          "mtlo             %[vector_64],     $ac1                            \n\t" /* odd 5 */
-          "mthi             $zero,            $ac1                            \n\t"
-          "preceu.ph.qbl    %[p3],            %[qload1]                       \n\t"
-          "sb               %[st1],           0(%[odd_dst])                   \n\t" /* odd 2 */
-          "addu             %[odd_dst],       %[odd_dst],     %[dst_pitch_2]  \n\t"
-          "ulw              %[qload2],        17(%[src])                      \n\t"
-          "dpa.w.ph         $ac3,             %[p4],          %[filter12]     \n\t" /* odd 4 */
-          "dpa.w.ph         $ac3,             %[p1],          %[filter34]     \n\t" /* odd 4 */
-          "dpa.w.ph         $ac3,             %[p5],          %[filter56]     \n\t" /* odd 4 */
-          "dpa.w.ph         $ac3,             %[p2],          %[filter78]     \n\t" /* odd 4 */
-          "extp             %[Temp3],         $ac3,           31              \n\t" /* odd 4 */
-          "lbux             %[st2],           %[Temp2](%[cm])                 \n\t" /* odd 3 */
-
-          /* odd 5. pixel */
-          "mtlo             %[vector_64],     $ac2                            \n\t" /* odd 6 */
-          "mthi             $zero,            $ac2                            \n\t"
-          "preceu.ph.qbr    %[p4],            %[qload2]                       \n\t"
-          "sb               %[st2],           0(%[odd_dst])                   \n\t" /* odd 3 */
-          "addu             %[odd_dst],       %[odd_dst],     %[dst_pitch_2]  \n\t"
-          "dpa.w.ph         $ac1,             %[p1],          %[filter12]     \n\t" /* odd 5 */
-          "dpa.w.ph         $ac1,             %[p5],          %[filter34]     \n\t" /* odd 5 */
-          "dpa.w.ph         $ac1,             %[p2],          %[filter56]     \n\t" /* odd 5 */
-          "dpa.w.ph         $ac1,             %[p3],          %[filter78]     \n\t" /* odd 5 */
-          "extp             %[Temp1],         $ac1,           31              \n\t" /* odd 5 */
-          "lbux             %[st3],           %[Temp3](%[cm])                 \n\t" /* odd 4 */
-
-          /* odd 6. pixel */
-          "mtlo             %[vector_64],     $ac3                            \n\t" /* odd 7 */
-          "mthi             $zero,            $ac3                            \n\t"
-          "preceu.ph.qbl    %[p1],            %[qload2]                       \n\t"
-          "sb               %[st3],           0(%[odd_dst])                   \n\t" /* odd 4 */
-          "addu             %[odd_dst],       %[odd_dst],     %[dst_pitch_2]  \n\t"
-          "ulw              %[qload1],        21(%[src])                      \n\t"
-          "dpa.w.ph         $ac2,             %[p5],          %[filter12]     \n\t" /* odd 6 */
-          "dpa.w.ph         $ac2,             %[p2],          %[filter34]     \n\t" /* odd 6 */
-          "dpa.w.ph         $ac2,             %[p3],          %[filter56]     \n\t" /* odd 6 */
-          "dpa.w.ph         $ac2,             %[p4],          %[filter78]     \n\t" /* odd 6 */
-          "extp             %[Temp2],         $ac2,           31              \n\t" /* odd 6 */
-          "lbux             %[st1],           %[Temp1](%[cm])                 \n\t" /* odd 5 */
-
-          /* odd 7. pixel */
-          "mtlo             %[vector_64],     $ac1                            \n\t" /* odd 8 */
-          "mthi             $zero,            $ac1                            \n\t"
-          "preceu.ph.qbr    %[p5],            %[qload1]                       \n\t"
-          "sb               %[st1],           0(%[odd_dst])                   \n\t" /* odd 5 */
-          "addu             %[odd_dst],       %[odd_dst],     %[dst_pitch_2]  \n\t"
-          "dpa.w.ph         $ac3,             %[p2],          %[filter12]     \n\t" /* odd 7 */
-          "dpa.w.ph         $ac3,             %[p3],          %[filter34]     \n\t" /* odd 7 */
-          "dpa.w.ph         $ac3,             %[p4],          %[filter56]     \n\t" /* odd 7 */
-          "dpa.w.ph         $ac3,             %[p1],          %[filter78]     \n\t" /* odd 7 */
-          "extp             %[Temp3],         $ac3,           31              \n\t" /* odd 7 */
-
-          /* odd 8. pixel */
-          "dpa.w.ph         $ac1,             %[p3],          %[filter12]     \n\t" /* odd 8 */
-          "dpa.w.ph         $ac1,             %[p4],          %[filter34]     \n\t" /* odd 8 */
-          "dpa.w.ph         $ac1,             %[p1],          %[filter56]     \n\t" /* odd 8 */
-          "dpa.w.ph         $ac1,             %[p5],          %[filter78]     \n\t" /* odd 8 */
-          "extp             %[Temp1],         $ac1,           31              \n\t" /* odd 8 */
-
-          "lbux             %[st2],           %[Temp2](%[cm])                 \n\t" /* odd 6 */
-          "lbux             %[st3],           %[Temp3](%[cm])                 \n\t" /* odd 7 */
-          "lbux             %[st1],           %[Temp1](%[cm])                 \n\t" /* odd 8 */
-
-          "sb               %[st2],           0(%[odd_dst])                   \n\t" /* odd 6 */
-          "addu             %[odd_dst],       %[odd_dst],     %[dst_pitch_2]  \n\t"
-
-          "sb               %[st3],           0(%[odd_dst])                   \n\t" /* odd 7 */
-          "addu             %[odd_dst],       %[odd_dst],     %[dst_pitch_2]  \n\t"
-
-          "sb               %[st1],           0(%[odd_dst])                   \n\t" /* odd 8 */
-
-          : [qload1] "=&r" (qload1), [qload2] "=&r" (qload2), [p5] "=&r" (p5),
-            [st1] "=&r" (st1), [st2] "=&r" (st2), [st3] "=&r" (st3),
-            [p1] "=&r" (p1), [p2] "=&r" (p2), [p3] "=&r" (p3), [p4] "=&r" (p4),
-            [Temp1] "=&r" (Temp1), [Temp2] "=&r" (Temp2), [Temp3] "=&r" (Temp3),
-            [dst] "+r" (dst), [odd_dst] "+r" (odd_dst)
-          : [filter12] "r" (filter12), [filter34] "r" (filter34),
-            [filter56] "r" (filter56), [filter78] "r" (filter78),
-            [vector_64] "r" (vector_64), [cm] "r" (cm),
-            [src] "r" (src), [dst_pitch_2] "r" (dst_pitch_2)
-      );
-
-      src += 16;
-      dst = (dst_ptr + ((c + 1) * 16 * dst_stride));
-      odd_dst = (dst + dst_stride);
-    }
-
-    /* Next row... */
-    src_ptr += src_stride;
-
-    dst_ptr += 1;
-  }
-}
-
-static void convolve_horiz_64_transposed_dspr2(const uint8_t *src_ptr,
-                                               int32_t src_stride,
-                                               uint8_t *dst_ptr,
-                                               int32_t dst_stride,
-                                               const int16_t *filter_x0,
-                                               int32_t h) {
-  int32_t c, y;
-  const uint8_t *src;
-  uint8_t *dst;
-  uint8_t *cm = vpx_ff_cropTbl;
-  uint32_t vector_64 = 64;
-  int32_t  filter12, filter34, filter56, filter78;
-  int32_t  Temp1, Temp2, Temp3;
-  uint32_t qload1, qload2;
-  uint32_t p1, p2, p3, p4, p5;
-  uint32_t st1, st2, st3;
-  uint32_t dst_pitch_2 = (dst_stride << 1);
-  uint8_t  *odd_dst;
-
-  filter12 = ((const int32_t *)filter_x0)[0];
-  filter34 = ((const int32_t *)filter_x0)[1];
-  filter56 = ((const int32_t *)filter_x0)[2];
-  filter78 = ((const int32_t *)filter_x0)[3];
-
-  for (y = h; y--;) {
-    /* prefetch data to cache memory */
-    prefetch_load(src_ptr + src_stride);
-    prefetch_load(src_ptr + src_stride + 32);
-    prefetch_load(src_ptr + src_stride + 64);
-
-    src = src_ptr;
-    dst = dst_ptr;
-
-    odd_dst = (dst + dst_stride);
-
-    for (c = 0; c < 4; c++) {
-      __asm__ __volatile__ (
-          "ulw              %[qload1],        0(%[src])                       \n\t"
-          "ulw              %[qload2],        4(%[src])                       \n\t"
-
-          /* even 1. pixel */
-          "mtlo             %[vector_64],     $ac1                            \n\t" /* even 1 */
-          "mthi             $zero,            $ac1                            \n\t"
-          "mtlo             %[vector_64],     $ac2                            \n\t" /* even 2 */
-          "mthi             $zero,            $ac2                            \n\t"
-          "preceu.ph.qbr    %[p3],            %[qload2]                       \n\t"
-          "preceu.ph.qbl    %[p4],            %[qload2]                       \n\t"
-          "preceu.ph.qbr    %[p1],            %[qload1]                       \n\t"
-          "preceu.ph.qbl    %[p2],            %[qload1]                       \n\t"
-          "ulw              %[qload2],        8(%[src])                       \n\t"
-          "dpa.w.ph         $ac1,             %[p1],          %[filter12]     \n\t" /* even 1 */
-          "dpa.w.ph         $ac1,             %[p2],          %[filter34]     \n\t" /* even 1 */
-          "dpa.w.ph         $ac1,             %[p3],          %[filter56]     \n\t" /* even 1 */
-          "dpa.w.ph         $ac1,             %[p4],          %[filter78]     \n\t" /* even 1 */
-          "extp             %[Temp1],         $ac1,           31              \n\t" /* even 1 */
-
-          /* even 2. pixel */
-          "mtlo             %[vector_64],     $ac3                            \n\t" /* even 3 */
-          "mthi             $zero,            $ac3                            \n\t"
-          "preceu.ph.qbr    %[p1],            %[qload2]                       \n\t"
-          "preceu.ph.qbl    %[p5],            %[qload2]                       \n\t"
-          "ulw              %[qload1],        12(%[src])                      \n\t"
-          "dpa.w.ph         $ac2,             %[p2],          %[filter12]     \n\t" /* even 1 */
-          "dpa.w.ph         $ac2,             %[p3],          %[filter34]     \n\t" /* even 1 */
-          "dpa.w.ph         $ac2,             %[p4],          %[filter56]     \n\t" /* even 1 */
-          "dpa.w.ph         $ac2,             %[p1],          %[filter78]     \n\t" /* even 1 */
-          "lbux             %[st1],           %[Temp1](%[cm])                 \n\t" /* even 1 */
-          "extp             %[Temp2],         $ac2,           31              \n\t" /* even 1 */
-
-          /* even 3. pixel */
-          "mtlo             %[vector_64],     $ac1                            \n\t" /* even 4 */
-          "mthi             $zero,            $ac1                            \n\t"
-          "preceu.ph.qbr    %[p2],            %[qload1]                       \n\t"
-          "sb               %[st1],           0(%[dst])                       \n\t" /* even 1 */
-          "addu             %[dst],           %[dst],         %[dst_pitch_2]             \n\t"
-          "dpa.w.ph         $ac3,             %[p3],          %[filter12]     \n\t" /* even 3 */
-          "dpa.w.ph         $ac3,             %[p4],          %[filter34]     \n\t" /* even 3 */
-          "dpa.w.ph         $ac3,             %[p1],          %[filter56]     \n\t" /* even 3 */
-          "dpa.w.ph         $ac3,             %[p5],          %[filter78]     \n\t" /* even 3 */
-          "extp             %[Temp3],         $ac3,           31              \n\t" /* even 3 */
-          "lbux             %[st2],           %[Temp2](%[cm])                 \n\t" /* even 1 */
-
-          /* even 4. pixel */
-          "mtlo             %[vector_64],     $ac2                            \n\t" /* even 5 */
-          "mthi             $zero,            $ac2                            \n\t"
-          "preceu.ph.qbl    %[p3],            %[qload1]                       \n\t"
-          "sb               %[st2],           0(%[dst])                       \n\t" /* even 2 */
-          "addu             %[dst],           %[dst],         %[dst_pitch_2]  \n\t"
-          "ulw              %[qload2],        16(%[src])                      \n\t"
-          "dpa.w.ph         $ac1,             %[p4],          %[filter12]     \n\t" /* even 4 */
-          "dpa.w.ph         $ac1,             %[p1],          %[filter34]     \n\t" /* even 4 */
-          "dpa.w.ph         $ac1,             %[p5],          %[filter56]     \n\t" /* even 4 */
-          "dpa.w.ph         $ac1,             %[p2],          %[filter78]     \n\t" /* even 4 */
-          "extp             %[Temp1],         $ac1,           31              \n\t" /* even 4 */
-          "lbux             %[st3],           %[Temp3](%[cm])                 \n\t" /* even 3 */
-
-          /* even 5. pixel */
-          "mtlo             %[vector_64],     $ac3                            \n\t" /* even 6 */
-          "mthi             $zero,            $ac3                            \n\t"
-          "preceu.ph.qbr    %[p4],            %[qload2]                       \n\t"
-          "sb               %[st3],           0(%[dst])                       \n\t" /* even 3 */
-          "addu             %[dst],           %[dst],         %[dst_pitch_2]  \n\t"
-          "dpa.w.ph         $ac2,             %[p1],          %[filter12]     \n\t" /* even 5 */
-          "dpa.w.ph         $ac2,             %[p5],          %[filter34]     \n\t" /* even 5 */
-          "dpa.w.ph         $ac2,             %[p2],          %[filter56]     \n\t" /* even 5 */
-          "dpa.w.ph         $ac2,             %[p3],          %[filter78]     \n\t" /* even 5 */
-          "extp             %[Temp2],         $ac2,           31              \n\t" /* even 5 */
-          "lbux             %[st1],           %[Temp1](%[cm])                 \n\t" /* even 4 */
-
-          /* even 6. pixel */
-          "mtlo             %[vector_64],     $ac1                            \n\t" /* even 7 */
-          "mthi             $zero,            $ac1                            \n\t"
-          "preceu.ph.qbl    %[p1],            %[qload2]                       \n\t"
-          "sb               %[st1],           0(%[dst])                       \n\t" /* even 4 */
-          "addu             %[dst],           %[dst],         %[dst_pitch_2]  \n\t"
-          "ulw              %[qload1],        20(%[src])                      \n\t"
-          "dpa.w.ph         $ac3,             %[p5],          %[filter12]     \n\t" /* even 6 */
-          "dpa.w.ph         $ac3,             %[p2],          %[filter34]     \n\t" /* even 6 */
-          "dpa.w.ph         $ac3,             %[p3],          %[filter56]     \n\t" /* even 6 */
-          "dpa.w.ph         $ac3,             %[p4],          %[filter78]     \n\t" /* even 6 */
-          "extp             %[Temp3],         $ac3,           31              \n\t" /* even 6 */
-          "lbux             %[st2],           %[Temp2](%[cm])                 \n\t" /* even 5 */
-
-          /* even 7. pixel */
-          "mtlo             %[vector_64],     $ac2                            \n\t" /* even 8 */
-          "mthi             $zero,            $ac2                            \n\t"
-          "preceu.ph.qbr    %[p5],            %[qload1]                       \n\t"
-          "sb               %[st2],           0(%[dst])                       \n\t" /* even 5 */
-          "addu             %[dst],           %[dst],         %[dst_pitch_2]  \n\t"
-          "dpa.w.ph         $ac1,             %[p2],          %[filter12]     \n\t" /* even 7 */
-          "dpa.w.ph         $ac1,             %[p3],          %[filter34]     \n\t" /* even 7 */
-          "dpa.w.ph         $ac1,             %[p4],          %[filter56]     \n\t" /* even 7 */
-          "dpa.w.ph         $ac1,             %[p1],          %[filter78]     \n\t" /* even 7 */
-          "extp             %[Temp1],         $ac1,           31              \n\t" /* even 7 */
-          "lbux             %[st3],           %[Temp3](%[cm])                 \n\t" /* even 6 */
-
-          /* even 8. pixel */
-          "mtlo             %[vector_64],     $ac3                            \n\t" /* odd 1 */
-          "mthi             $zero,            $ac3                            \n\t"
-          "dpa.w.ph         $ac2,             %[p3],          %[filter12]     \n\t" /* even 8 */
-          "dpa.w.ph         $ac2,             %[p4],          %[filter34]     \n\t" /* even 8 */
-          "sb               %[st3],           0(%[dst])                       \n\t" /* even 6 */
-          "addu             %[dst],           %[dst],         %[dst_pitch_2]  \n\t"
-          "dpa.w.ph         $ac2,             %[p1],          %[filter56]     \n\t" /* even 8 */
-          "dpa.w.ph         $ac2,             %[p5],          %[filter78]     \n\t" /* even 8 */
-          "extp             %[Temp2],         $ac2,           31              \n\t" /* even 8 */
-          "lbux             %[st1],           %[Temp1](%[cm])                 \n\t" /* even 7 */
-
-          /* ODD pixels */
-          "ulw              %[qload1],        1(%[src])                       \n\t"
-          "ulw              %[qload2],        5(%[src])                       \n\t"
-
-          /* odd 1. pixel */
-          "mtlo             %[vector_64],     $ac1                            \n\t" /* odd 2 */
-          "mthi             $zero,            $ac1                            \n\t"
-          "preceu.ph.qbr    %[p1],            %[qload1]                       \n\t"
-          "preceu.ph.qbl    %[p2],            %[qload1]                       \n\t"
-          "preceu.ph.qbr    %[p3],            %[qload2]                       \n\t"
-          "preceu.ph.qbl    %[p4],            %[qload2]                       \n\t"
-          "sb               %[st1],           0(%[dst])                       \n\t" /* even 7 */
-          "addu             %[dst],           %[dst],         %[dst_pitch_2]  \n\t"
-          "ulw              %[qload2],        9(%[src])                       \n\t"
-          "dpa.w.ph         $ac3,             %[p1],          %[filter12]     \n\t" /* odd 1 */
-          "dpa.w.ph         $ac3,             %[p2],          %[filter34]     \n\t" /* odd 1 */
-          "dpa.w.ph         $ac3,             %[p3],          %[filter56]     \n\t" /* odd 1 */
-          "dpa.w.ph         $ac3,             %[p4],          %[filter78]     \n\t" /* odd 1 */
-          "extp             %[Temp3],         $ac3,           31              \n\t" /* odd 1 */
-          "lbux             %[st2],           %[Temp2](%[cm])                 \n\t" /* even 8 */
-
-          /* odd 2. pixel */
-          "mtlo             %[vector_64],     $ac2                            \n\t" /* odd 3 */
-          "mthi             $zero,            $ac2                            \n\t"
-          "preceu.ph.qbr    %[p1],            %[qload2]                       \n\t"
-          "preceu.ph.qbl    %[p5],            %[qload2]                       \n\t"
-          "sb               %[st2],           0(%[dst])                       \n\t" /* even 8 */
-          "ulw              %[qload1],        13(%[src])                      \n\t"
-          "dpa.w.ph         $ac1,             %[p2],          %[filter12]     \n\t" /* odd 2 */
-          "dpa.w.ph         $ac1,             %[p3],          %[filter34]     \n\t" /* odd 2 */
-          "dpa.w.ph         $ac1,             %[p4],          %[filter56]     \n\t" /* odd 2 */
-          "dpa.w.ph         $ac1,             %[p1],          %[filter78]     \n\t" /* odd 2 */
-          "extp             %[Temp1],         $ac1,           31              \n\t" /* odd 2 */
-          "lbux             %[st3],           %[Temp3](%[cm])                 \n\t" /* odd 1 */
-
-          /* odd 3. pixel */
-          "mtlo             %[vector_64],     $ac3                            \n\t" /* odd 4 */
-          "mthi             $zero,            $ac3                            \n\t"
-          "preceu.ph.qbr    %[p2],            %[qload1]                       \n\t"
-          "sb               %[st3],           0(%[odd_dst])                   \n\t" /* odd 1 */
-          "addu             %[odd_dst],       %[odd_dst],     %[dst_pitch_2]  \n\t"
-          "dpa.w.ph         $ac2,             %[p3],          %[filter12]     \n\t" /* odd 3 */
-          "dpa.w.ph         $ac2,             %[p4],          %[filter34]     \n\t" /* odd 3 */
-          "dpa.w.ph         $ac2,             %[p1],          %[filter56]     \n\t" /* odd 3 */
-          "dpa.w.ph         $ac2,             %[p5],          %[filter78]     \n\t" /* odd 3 */
-          "extp             %[Temp2],         $ac2,           31              \n\t" /* odd 3 */
-          "lbux             %[st1],           %[Temp1](%[cm])                 \n\t" /* odd 2 */
-
-          /* odd 4. pixel */
-          "mtlo             %[vector_64],     $ac1                            \n\t" /* odd 5 */
-          "mthi             $zero,            $ac1                            \n\t"
-          "preceu.ph.qbl    %[p3],            %[qload1]                       \n\t"
-          "sb               %[st1],           0(%[odd_dst])                   \n\t" /* odd 2 */
-          "addu             %[odd_dst],       %[odd_dst],     %[dst_pitch_2]  \n\t"
-          "ulw              %[qload2],        17(%[src])                      \n\t"
-          "dpa.w.ph         $ac3,             %[p4],          %[filter12]     \n\t" /* odd 4 */
-          "dpa.w.ph         $ac3,             %[p1],          %[filter34]     \n\t" /* odd 4 */
-          "dpa.w.ph         $ac3,             %[p5],          %[filter56]     \n\t" /* odd 4 */
-          "dpa.w.ph         $ac3,             %[p2],          %[filter78]     \n\t" /* odd 4 */
-          "extp             %[Temp3],         $ac3,           31              \n\t" /* odd 4 */
-          "lbux             %[st2],           %[Temp2](%[cm])                 \n\t" /* odd 3 */
-
-          /* odd 5. pixel */
-          "mtlo             %[vector_64],     $ac2                            \n\t" /* odd 6 */
-          "mthi             $zero,            $ac2                            \n\t"
-          "preceu.ph.qbr    %[p4],            %[qload2]                       \n\t"
-          "sb               %[st2],           0(%[odd_dst])                   \n\t" /* odd 3 */
-          "addu             %[odd_dst],       %[odd_dst],     %[dst_pitch_2]  \n\t"
-          "dpa.w.ph         $ac1,             %[p1],          %[filter12]     \n\t" /* odd 5 */
-          "dpa.w.ph         $ac1,             %[p5],          %[filter34]     \n\t" /* odd 5 */
-          "dpa.w.ph         $ac1,             %[p2],          %[filter56]     \n\t" /* odd 5 */
-          "dpa.w.ph         $ac1,             %[p3],          %[filter78]     \n\t" /* odd 5 */
-          "extp             %[Temp1],         $ac1,           31              \n\t" /* odd 5 */
-          "lbux             %[st3],           %[Temp3](%[cm])                 \n\t" /* odd 4 */
-
-          /* odd 6. pixel */
-          "mtlo             %[vector_64],     $ac3                            \n\t" /* odd 7 */
-          "mthi             $zero,            $ac3                            \n\t"
-          "preceu.ph.qbl    %[p1],            %[qload2]                       \n\t"
-          "sb               %[st3],           0(%[odd_dst])                   \n\t" /* odd 4 */
-          "addu             %[odd_dst],       %[odd_dst],     %[dst_pitch_2]  \n\t"
-          "ulw              %[qload1],        21(%[src])                      \n\t"
-          "dpa.w.ph         $ac2,             %[p5],          %[filter12]     \n\t" /* odd 6 */
-          "dpa.w.ph         $ac2,             %[p2],          %[filter34]     \n\t" /* odd 6 */
-          "dpa.w.ph         $ac2,             %[p3],          %[filter56]     \n\t" /* odd 6 */
-          "dpa.w.ph         $ac2,             %[p4],          %[filter78]     \n\t" /* odd 6 */
-          "extp             %[Temp2],         $ac2,           31              \n\t" /* odd 6 */
-          "lbux             %[st1],           %[Temp1](%[cm])                 \n\t" /* odd 5 */
-
-          /* odd 7. pixel */
-          "mtlo             %[vector_64],     $ac1                            \n\t" /* odd 8 */
-          "mthi             $zero,            $ac1                            \n\t"
-          "preceu.ph.qbr    %[p5],            %[qload1]                       \n\t"
-          "sb               %[st1],           0(%[odd_dst])                   \n\t" /* odd 5 */
-          "addu             %[odd_dst],       %[odd_dst],     %[dst_pitch_2]  \n\t"
-          "dpa.w.ph         $ac3,             %[p2],          %[filter12]     \n\t" /* odd 7 */
-          "dpa.w.ph         $ac3,             %[p3],          %[filter34]     \n\t" /* odd 7 */
-          "dpa.w.ph         $ac3,             %[p4],          %[filter56]     \n\t" /* odd 7 */
-          "dpa.w.ph         $ac3,             %[p1],          %[filter78]     \n\t" /* odd 7 */
-          "extp             %[Temp3],         $ac3,           31              \n\t" /* odd 7 */
-
-          /* odd 8. pixel */
-          "dpa.w.ph         $ac1,             %[p3],          %[filter12]     \n\t" /* odd 8 */
-          "dpa.w.ph         $ac1,             %[p4],          %[filter34]     \n\t" /* odd 8 */
-          "dpa.w.ph         $ac1,             %[p1],          %[filter56]     \n\t" /* odd 8 */
-          "dpa.w.ph         $ac1,             %[p5],          %[filter78]     \n\t" /* odd 8 */
-          "extp             %[Temp1],         $ac1,           31              \n\t" /* odd 8 */
-
-          "lbux             %[st2],           %[Temp2](%[cm])                 \n\t" /* odd 6 */
-          "lbux             %[st3],           %[Temp3](%[cm])                 \n\t" /* odd 7 */
-          "lbux             %[st1],           %[Temp1](%[cm])                 \n\t" /* odd 8 */
-
-          "sb               %[st2],           0(%[odd_dst])                   \n\t" /* odd 6 */
-          "addu             %[odd_dst],       %[odd_dst],     %[dst_pitch_2]  \n\t"
-
-          "sb               %[st3],           0(%[odd_dst])                   \n\t" /* odd 7 */
-          "addu             %[odd_dst],       %[odd_dst],     %[dst_pitch_2]  \n\t"
-
-          "sb               %[st1],           0(%[odd_dst])                   \n\t" /* odd 8 */
-
-          : [qload1] "=&r" (qload1), [qload2] "=&r" (qload2), [p5] "=&r" (p5),
-            [st1] "=&r" (st1), [st2] "=&r" (st2), [st3] "=&r" (st3),
-            [p1] "=&r" (p1), [p2] "=&r" (p2), [p3] "=&r" (p3), [p4] "=&r" (p4),
-            [Temp1] "=&r" (Temp1), [Temp2] "=&r" (Temp2), [Temp3] "=&r" (Temp3),
-            [dst] "+r" (dst), [odd_dst] "+r" (odd_dst)
-          : [filter12] "r" (filter12), [filter34] "r" (filter34),
-            [filter56] "r" (filter56), [filter78] "r" (filter78),
-            [vector_64] "r" (vector_64), [cm] "r" (cm),
-            [src] "r" (src), [dst_pitch_2] "r" (dst_pitch_2)
-      );
-
-      src += 16;
-      dst = (dst_ptr + ((c + 1) * 16 * dst_stride));
-      odd_dst = (dst + dst_stride);
-    }
-
-    /* Next row... */
-    src_ptr += src_stride;
-
-    dst_ptr += 1;
-  }
-}
-
-void convolve_horiz_transposed(const uint8_t *src, ptrdiff_t src_stride,
-                               uint8_t *dst, ptrdiff_t dst_stride,
-                               const int16_t *filter, int w, int h) {
-  int x, y, k;
-
-  for (y = 0; y < h; ++y) {
-    for (x = 0; x < w; ++x) {
-      int sum = 0;
-
-      for (k = 0; k < 8; ++k)
-        sum += src[x + k] * filter[k];
-
-      dst[x * dst_stride] = clip_pixel(ROUND_POWER_OF_TWO(sum, FILTER_BITS));
-    }
-
-    src += src_stride;
-    dst += 1;
-  }
-}
-
-void copy_horiz_transposed(const uint8_t *src, ptrdiff_t src_stride,
-                           uint8_t *dst, ptrdiff_t dst_stride,
-                           int w, int h) {
-  int x, y;
-
-  for (y = 0; y < h; ++y) {
-    for (x = 0; x < w; ++x) {
-      dst[x * dst_stride] = src[x];
-    }
-
-    src += src_stride;
-    dst += 1;
-  }
-}
-
-void vpx_convolve8_dspr2(const uint8_t *src, ptrdiff_t src_stride,
-                         uint8_t *dst, ptrdiff_t dst_stride,
-                         const int16_t *filter_x, int x_step_q4,
-                         const int16_t *filter_y, int y_step_q4,
-                         int w, int h) {
-  DECLARE_ALIGNED(32, uint8_t, temp[64 * 135]);
-  int32_t intermediate_height = ((h * y_step_q4) >> 4) + 7;
-  uint32_t pos = 38;
-
-  /* bit positon for extract from acc */
-  __asm__ __volatile__ (
-    "wrdsp      %[pos],     1           \n\t"
-    :
-    : [pos] "r" (pos)
-  );
-
-  if (intermediate_height < h)
-    intermediate_height = h;
-
-  if (x_step_q4 != 16 || y_step_q4 != 16)
-    return vpx_convolve8_c(src, src_stride,
-                           dst, dst_stride,
-                           filter_x, x_step_q4,
-                           filter_y, y_step_q4,
-                           w, h);
-
-  if ((((const int32_t *)filter_x)[1] == 0x800000)
-      && (((const int32_t *)filter_y)[1] == 0x800000))
-    return vpx_convolve_copy(src, src_stride,
-                             dst, dst_stride,
-                             filter_x, x_step_q4,
-                             filter_y, y_step_q4,
-                             w, h);
-
-  /* copy the src to dst */
-  if (filter_x[3] == 0x80) {
-    copy_horiz_transposed(src - src_stride * 3, src_stride,
-                          temp, intermediate_height,
-                          w, intermediate_height);
-  } else if (((const int32_t *)filter_x)[0] == 0) {
-    vpx_convolve2_dspr2(src - src_stride * 3, src_stride,
-                        temp, intermediate_height,
-                        filter_x,
-                        w, intermediate_height);
-  } else {
-    src -= (src_stride * 3 + 3);
-
-    /* prefetch data to cache memory */
-    prefetch_load(src);
-    prefetch_load(src + 32);
-
-    switch (w) {
-      case 4:
-        convolve_horiz_4_transposed_dspr2(src, src_stride,
-                                          temp, intermediate_height,
-                                          filter_x, intermediate_height);
-        break;
-      case 8:
-        convolve_horiz_8_transposed_dspr2(src, src_stride,
-                                          temp, intermediate_height,
-                                          filter_x, intermediate_height);
-        break;
-      case 16:
-      case 32:
-        convolve_horiz_16_transposed_dspr2(src, src_stride,
-                                           temp, intermediate_height,
-                                           filter_x, intermediate_height,
-                                           (w/16));
-        break;
-      case 64:
-        prefetch_load(src + 32);
-        convolve_horiz_64_transposed_dspr2(src, src_stride,
-                                           temp, intermediate_height,
-                                           filter_x, intermediate_height);
-        break;
-      default:
-        convolve_horiz_transposed(src, src_stride,
-                                  temp, intermediate_height,
-                                  filter_x, w, intermediate_height);
-        break;
-    }
-  }
-
-  /* copy the src to dst */
-  if (filter_y[3] == 0x80) {
-    copy_horiz_transposed(temp + 3, intermediate_height,
-                          dst, dst_stride,
-                          h, w);
-  } else if (((const int32_t *)filter_y)[0] == 0) {
-    vpx_convolve2_dspr2(temp + 3, intermediate_height,
-                        dst, dst_stride,
-                        filter_y,
-                        h, w);
-  } else {
-    switch (h) {
-      case 4:
-        convolve_horiz_4_transposed_dspr2(temp, intermediate_height,
-                                          dst, dst_stride,
-                                          filter_y, w);
-        break;
-      case 8:
-        convolve_horiz_8_transposed_dspr2(temp, intermediate_height,
-                                          dst, dst_stride,
-                                          filter_y, w);
-        break;
-      case 16:
-      case 32:
-        convolve_horiz_16_transposed_dspr2(temp, intermediate_height,
-                                           dst, dst_stride,
-                                           filter_y, w, (h/16));
-        break;
-      case 64:
-        convolve_horiz_64_transposed_dspr2(temp, intermediate_height,
-                                           dst, dst_stride,
-                                           filter_y, w);
-        break;
-      default:
-        convolve_horiz_transposed(temp, intermediate_height,
-                                  dst, dst_stride,
-                                  filter_y, h, w);
-        break;
-    }
-  }
-}
-
-void vpx_convolve_copy_dspr2(const uint8_t *src, ptrdiff_t src_stride,
-                             uint8_t *dst, ptrdiff_t dst_stride,
-                             const int16_t *filter_x, int filter_x_stride,
-                             const int16_t *filter_y, int filter_y_stride,
-                             int w, int h) {
-  int x, y;
-
-  /* prefetch data to cache memory */
-  prefetch_load(src);
-  prefetch_load(src + 32);
-  prefetch_store(dst);
-
-  switch (w) {
-    case 4:
-      {
-      uint32_t tp1;
-
-      /* 1 word storage */
-      for (y = h; y--; ) {
-        prefetch_load(src + src_stride);
-        prefetch_load(src + src_stride + 32);
-        prefetch_store(dst + dst_stride);
-
-        __asm__ __volatile__ (
-            "ulw              %[tp1],         (%[src])      \n\t"
-            "sw               %[tp1],         (%[dst])      \n\t"  /* store */
-
-            : [tp1] "=&r" (tp1)
-            : [src] "r" (src), [dst] "r" (dst)
-        );
-
-        src += src_stride;
-        dst += dst_stride;
-      }
-      }
-      break;
-    case 8:
-      {
-      uint32_t tp1, tp2;
-
-      /* 2 word storage */
-      for (y = h; y--; ) {
-        prefetch_load(src + src_stride);
-        prefetch_load(src + src_stride + 32);
-        prefetch_store(dst + dst_stride);
-
-        __asm__ __volatile__ (
-            "ulw              %[tp1],         0(%[src])      \n\t"
-            "ulw              %[tp2],         4(%[src])      \n\t"
-            "sw               %[tp1],         0(%[dst])      \n\t"  /* store */
-            "sw               %[tp2],         4(%[dst])      \n\t"  /* store */
-
-            : [tp1] "=&r" (tp1), [tp2] "=&r" (tp2)
-            : [src] "r" (src), [dst] "r" (dst)
-        );
-
-        src += src_stride;
-        dst += dst_stride;
-      }
-      }
-      break;
-    case 16:
-      {
-      uint32_t tp1, tp2, tp3, tp4;
-
-      /* 4 word storage */
-      for (y = h; y--; ) {
-        prefetch_load(src + src_stride);
-        prefetch_load(src + src_stride + 32);
-        prefetch_store(dst + dst_stride);
-
-        __asm__ __volatile__ (
-            "ulw              %[tp1],         0(%[src])      \n\t"
-            "ulw              %[tp2],         4(%[src])      \n\t"
-            "ulw              %[tp3],         8(%[src])      \n\t"
-            "ulw              %[tp4],         12(%[src])     \n\t"
-
-            "sw               %[tp1],         0(%[dst])      \n\t"  /* store */
-            "sw               %[tp2],         4(%[dst])      \n\t"  /* store */
-            "sw               %[tp3],         8(%[dst])      \n\t"  /* store */
-            "sw               %[tp4],         12(%[dst])     \n\t"  /* store */
-
-            : [tp1] "=&r" (tp1), [tp2] "=&r" (tp2),
-              [tp3] "=&r" (tp3), [tp4] "=&r" (tp4)
-            : [src] "r" (src), [dst] "r" (dst)
-        );
-
-        src += src_stride;
-        dst += dst_stride;
-      }
-      }
-      break;
-    case 32:
-      {
-      uint32_t tp1, tp2, tp3, tp4;
-      uint32_t tp5, tp6, tp7, tp8;
-
-      /* 8 word storage */
-      for (y = h; y--; ) {
-        prefetch_load(src + src_stride);
-        prefetch_load(src + src_stride + 32);
-        prefetch_store(dst + dst_stride);
-
-        __asm__ __volatile__ (
-            "ulw              %[tp1],         0(%[src])      \n\t"
-            "ulw              %[tp2],         4(%[src])      \n\t"
-            "ulw              %[tp3],         8(%[src])      \n\t"
-            "ulw              %[tp4],         12(%[src])     \n\t"
-            "ulw              %[tp5],         16(%[src])     \n\t"
-            "ulw              %[tp6],         20(%[src])     \n\t"
-            "ulw              %[tp7],         24(%[src])     \n\t"
-            "ulw              %[tp8],         28(%[src])     \n\t"
-
-            "sw               %[tp1],         0(%[dst])      \n\t"  /* store */
-            "sw               %[tp2],         4(%[dst])      \n\t"  /* store */
-            "sw               %[tp3],         8(%[dst])      \n\t"  /* store */
-            "sw               %[tp4],         12(%[dst])     \n\t"  /* store */
-            "sw               %[tp5],         16(%[dst])     \n\t"  /* store */
-            "sw               %[tp6],         20(%[dst])     \n\t"  /* store */
-            "sw               %[tp7],         24(%[dst])     \n\t"  /* store */
-            "sw               %[tp8],         28(%[dst])     \n\t"  /* store */
-
-            : [tp1] "=&r" (tp1), [tp2] "=&r" (tp2),
-              [tp3] "=&r" (tp3), [tp4] "=&r" (tp4),
-              [tp5] "=&r" (tp5), [tp6] "=&r" (tp6),
-              [tp7] "=&r" (tp7), [tp8] "=&r" (tp8)
-            : [src] "r" (src), [dst] "r" (dst)
-        );
-
-        src += src_stride;
-        dst += dst_stride;
-      }
-      }
-      break;
-    case 64:
-      {
-      uint32_t tp1, tp2, tp3, tp4;
-      uint32_t tp5, tp6, tp7, tp8;
-
-      prefetch_load(src + 64);
-      prefetch_store(dst + 32);
-
-      /* 16 word storage */
-      for (y = h; y--; ) {
-        prefetch_load(src + src_stride);
-        prefetch_load(src + src_stride + 32);
-        prefetch_load(src + src_stride + 64);
-        prefetch_store(dst + dst_stride);
-        prefetch_store(dst + dst_stride + 32);
-
-        __asm__ __volatile__ (
-            "ulw              %[tp1],         0(%[src])      \n\t"
-            "ulw              %[tp2],         4(%[src])      \n\t"
-            "ulw              %[tp3],         8(%[src])      \n\t"
-            "ulw              %[tp4],         12(%[src])     \n\t"
-            "ulw              %[tp5],         16(%[src])     \n\t"
-            "ulw              %[tp6],         20(%[src])     \n\t"
-            "ulw              %[tp7],         24(%[src])     \n\t"
-            "ulw              %[tp8],         28(%[src])     \n\t"
-
-            "sw               %[tp1],         0(%[dst])      \n\t"  /* store */
-            "sw               %[tp2],         4(%[dst])      \n\t"  /* store */
-            "sw               %[tp3],         8(%[dst])      \n\t"  /* store */
-            "sw               %[tp4],         12(%[dst])     \n\t"  /* store */
-            "sw               %[tp5],         16(%[dst])     \n\t"  /* store */
-            "sw               %[tp6],         20(%[dst])     \n\t"  /* store */
-            "sw               %[tp7],         24(%[dst])     \n\t"  /* store */
-            "sw               %[tp8],         28(%[dst])     \n\t"  /* store */
-
-            "ulw              %[tp1],         32(%[src])     \n\t"
-            "ulw              %[tp2],         36(%[src])     \n\t"
-            "ulw              %[tp3],         40(%[src])     \n\t"
-            "ulw              %[tp4],         44(%[src])     \n\t"
-            "ulw              %[tp5],         48(%[src])     \n\t"
-            "ulw              %[tp6],         52(%[src])     \n\t"
-            "ulw              %[tp7],         56(%[src])     \n\t"
-            "ulw              %[tp8],         60(%[src])     \n\t"
-
-            "sw               %[tp1],         32(%[dst])     \n\t"  /* store */
-            "sw               %[tp2],         36(%[dst])     \n\t"  /* store */
-            "sw               %[tp3],         40(%[dst])     \n\t"  /* store */
-            "sw               %[tp4],         44(%[dst])     \n\t"  /* store */
-            "sw               %[tp5],         48(%[dst])     \n\t"  /* store */
-            "sw               %[tp6],         52(%[dst])     \n\t"  /* store */
-            "sw               %[tp7],         56(%[dst])     \n\t"  /* store */
-            "sw               %[tp8],         60(%[dst])     \n\t"  /* store */
-
-            : [tp1] "=&r" (tp1), [tp2] "=&r" (tp2),
-              [tp3] "=&r" (tp3), [tp4] "=&r" (tp4),
-              [tp5] "=&r" (tp5), [tp6] "=&r" (tp6),
-              [tp7] "=&r" (tp7), [tp8] "=&r" (tp8)
-            : [src] "r" (src), [dst] "r" (dst)
-        );
-
-        src += src_stride;
-        dst += dst_stride;
-      }
-      }
-      break;
-    default:
-      for (y = h; y--; ) {
-        for (x = 0; x < w; ++x) {
-          dst[x] = src[x];
-        }
-
-        src += src_stride;
-        dst += dst_stride;
-      }
-      break;
-  }
-}
-#endif
--- a/vpx_dsp/mips/vpx_convolve8_horiz_dspr2.c
+++ /dev/null
@@ -1,921 +1,0 @@
-/*
- *  Copyright (c) 2013 The WebM project authors. All Rights Reserved.
- *
- *  Use of this source code is governed by a BSD-style license
- *  that can be found in the LICENSE file in the root of the source
- *  tree. An additional intellectual property rights grant can be found
- *  in the file PATENTS.  All contributing project authors may
- *  be found in the AUTHORS file in the root of the source tree.
- */
-
-#include <assert.h>
-#include <stdio.h>
-
-#include "./vpx_dsp_rtcd.h"
-#include "vpx_dsp/mips/vpx_common_dspr2.h"
-#include "vpx_dsp/vpx_dsp_common.h"
-#include "vpx_dsp/vpx_filter.h"
-#include "vpx_ports/mem.h"
-
-#if HAVE_DSPR2
-static void convolve_horiz_4_dspr2(const uint8_t *src,
-                                   int32_t src_stride,
-                                   uint8_t *dst,
-                                   int32_t dst_stride,
-                                   const int16_t *filter_x0,
-                                   int32_t h) {
-  int32_t y;
-  uint8_t *cm = vpx_ff_cropTbl;
-  int32_t vector1b, vector2b, vector3b, vector4b;
-  int32_t Temp1, Temp2, Temp3, Temp4;
-  uint32_t vector4a = 64;
-  uint32_t tp1, tp2;
-  uint32_t p1, p2, p3, p4;
-  uint32_t n1, n2, n3, n4;
-  uint32_t tn1, tn2;
-
-  vector1b = ((const int32_t *)filter_x0)[0];
-  vector2b = ((const int32_t *)filter_x0)[1];
-  vector3b = ((const int32_t *)filter_x0)[2];
-  vector4b = ((const int32_t *)filter_x0)[3];
-
-  for (y = h; y--;) {
-    /* prefetch data to cache memory */
-    prefetch_load(src + src_stride);
-    prefetch_load(src + src_stride + 32);
-    prefetch_store(dst + dst_stride);
-
-    __asm__ __volatile__ (
-        "ulw              %[tp1],      0(%[src])                      \n\t"
-        "ulw              %[tp2],      4(%[src])                      \n\t"
-
-        /* even 1. pixel */
-        "mtlo             %[vector4a], $ac3                           \n\t"
-        "mthi             $zero,       $ac3                           \n\t"
-        "preceu.ph.qbr    %[p1],       %[tp1]                         \n\t"
-        "preceu.ph.qbl    %[p2],       %[tp1]                         \n\t"
-        "preceu.ph.qbr    %[p3],       %[tp2]                         \n\t"
-        "preceu.ph.qbl    %[p4],       %[tp2]                         \n\t"
-        "dpa.w.ph         $ac3,        %[p1],          %[vector1b]    \n\t"
-        "dpa.w.ph         $ac3,        %[p2],          %[vector2b]    \n\t"
-        "dpa.w.ph         $ac3,        %[p3],          %[vector3b]    \n\t"
-        "ulw              %[tn2],      8(%[src])                      \n\t"
-        "dpa.w.ph         $ac3,        %[p4],          %[vector4b]    \n\t"
-        "extp             %[Temp1],    $ac3,           31             \n\t"
-
-        /* even 2. pixel */
-        "mtlo             %[vector4a], $ac2                           \n\t"
-        "mthi             $zero,       $ac2                           \n\t"
-        "preceu.ph.qbr    %[p1],       %[tn2]                         \n\t"
-        "balign           %[tn1],      %[tn2],         3              \n\t"
-        "balign           %[tn2],      %[tp2],         3              \n\t"
-        "balign           %[tp2],      %[tp1],         3              \n\t"
-        "dpa.w.ph         $ac2,        %[p2],          %[vector1b]    \n\t"
-        "dpa.w.ph         $ac2,        %[p3],          %[vector2b]    \n\t"
-        "dpa.w.ph         $ac2,        %[p4],          %[vector3b]    \n\t"
-        "dpa.w.ph         $ac2,        %[p1],          %[vector4b]    \n\t"
-        "extp             %[Temp3],    $ac2,           31             \n\t"
-
-        /* odd 1. pixel */
-        "lbux             %[tp1],      %[Temp1](%[cm])                \n\t"
-        "mtlo             %[vector4a], $ac3                           \n\t"
-        "mthi             $zero,       $ac3                           \n\t"
-        "preceu.ph.qbr    %[n1],       %[tp2]                         \n\t"
-        "preceu.ph.qbl    %[n2],       %[tp2]                         \n\t"
-        "preceu.ph.qbr    %[n3],       %[tn2]                         \n\t"
-        "preceu.ph.qbl    %[n4],       %[tn2]                         \n\t"
-        "dpa.w.ph         $ac3,        %[n1],          %[vector1b]    \n\t"
-        "dpa.w.ph         $ac3,        %[n2],          %[vector2b]    \n\t"
-        "dpa.w.ph         $ac3,        %[n3],          %[vector3b]    \n\t"
-        "dpa.w.ph         $ac3,        %[n4],          %[vector4b]    \n\t"
-        "extp             %[Temp2],    $ac3,           31             \n\t"
-
-        /* odd 2. pixel */
-        "lbux             %[tp2],      %[Temp3](%[cm])                \n\t"
-        "mtlo             %[vector4a], $ac2                           \n\t"
-        "mthi             $zero,       $ac2                           \n\t"
-        "preceu.ph.qbr    %[n1],       %[tn1]                         \n\t"
-        "dpa.w.ph         $ac2,        %[n2],          %[vector1b]    \n\t"
-        "dpa.w.ph         $ac2,        %[n3],          %[vector2b]    \n\t"
-        "dpa.w.ph         $ac2,        %[n4],          %[vector3b]    \n\t"
-        "dpa.w.ph         $ac2,        %[n1],          %[vector4b]    \n\t"
-        "extp             %[Temp4],    $ac2,           31             \n\t"
-
-        /* clamp */
-        "lbux             %[tn1],      %[Temp2](%[cm])                \n\t"
-        "lbux             %[n2],       %[Temp4](%[cm])                \n\t"
-
-        /* store bytes */
-        "sb               %[tp1],      0(%[dst])                      \n\t"
-        "sb               %[tn1],      1(%[dst])                      \n\t"
-        "sb               %[tp2],      2(%[dst])                      \n\t"
-        "sb               %[n2],       3(%[dst])                      \n\t"
-
-        : [tp1] "=&r" (tp1), [tp2] "=&r" (tp2),
-          [tn1] "=&r" (tn1), [tn2] "=&r" (tn2),
-          [p1] "=&r" (p1), [p2] "=&r" (p2), [p3] "=&r" (p3), [p4] "=&r" (p4),
-          [n1] "=&r" (n1), [n2] "=&r" (n2), [n3] "=&r" (n3), [n4] "=&r" (n4),
-          [Temp1] "=&r" (Temp1), [Temp2] "=&r" (Temp2),
-          [Temp3] "=&r" (Temp3), [Temp4] "=&r" (Temp4)
-        : [vector1b] "r" (vector1b), [vector2b] "r" (vector2b),
-          [vector3b] "r" (vector3b), [vector4b] "r" (vector4b),
-          [vector4a] "r" (vector4a),
-          [cm] "r" (cm), [dst] "r" (dst), [src] "r" (src)
-    );
-
-    /* Next row... */
-    src += src_stride;
-    dst += dst_stride;
-  }
-}
-
-static void convolve_horiz_8_dspr2(const uint8_t *src,
-                                   int32_t src_stride,
-                                   uint8_t *dst,
-                                   int32_t dst_stride,
-                                   const int16_t *filter_x0,
-                                   int32_t h) {
-  int32_t y;
-  uint8_t *cm = vpx_ff_cropTbl;
-  uint32_t vector4a = 64;
-  int32_t vector1b, vector2b, vector3b, vector4b;
-  int32_t Temp1, Temp2, Temp3;
-  uint32_t tp1, tp2;
-  uint32_t p1, p2, p3, p4, n1;
-  uint32_t tn1, tn2, tn3;
-  uint32_t st0, st1;
-
-  vector1b = ((const int32_t *)filter_x0)[0];
-  vector2b = ((const int32_t *)filter_x0)[1];
-  vector3b = ((const int32_t *)filter_x0)[2];
-  vector4b = ((const int32_t *)filter_x0)[3];
-
-  for (y = h; y--;) {
-    /* prefetch data to cache memory */
-    prefetch_load(src + src_stride);
-    prefetch_load(src + src_stride + 32);
-    prefetch_store(dst + dst_stride);
-
-    __asm__ __volatile__ (
-        "ulw              %[tp1],      0(%[src])                      \n\t"
-        "ulw              %[tp2],      4(%[src])                      \n\t"
-
-        /* even 1. pixel */
-        "mtlo             %[vector4a], $ac3                           \n\t"
-        "mthi             $zero,       $ac3                           \n\t"
-        "mtlo             %[vector4a], $ac2                           \n\t"
-        "mthi             $zero,       $ac2                           \n\t"
-        "preceu.ph.qbr    %[p1],       %[tp1]                         \n\t"
-        "preceu.ph.qbl    %[p2],       %[tp1]                         \n\t"
-        "preceu.ph.qbr    %[p3],       %[tp2]                         \n\t"
-        "preceu.ph.qbl    %[p4],       %[tp2]                         \n\t"
-        "ulw              %[tn2],      8(%[src])                      \n\t"
-        "dpa.w.ph         $ac3,        %[p1],          %[vector1b]    \n\t"
-        "dpa.w.ph         $ac3,        %[p2],          %[vector2b]    \n\t"
-        "dpa.w.ph         $ac3,        %[p3],          %[vector3b]    \n\t"
-        "dpa.w.ph         $ac3,        %[p4],          %[vector4b]    \n\t"
-        "extp             %[Temp1],    $ac3,           31             \n\t"
-
-        /* even 2. pixel */
-        "preceu.ph.qbr    %[p1],       %[tn2]                         \n\t"
-        "preceu.ph.qbl    %[n1],       %[tn2]                         \n\t"
-        "ulw              %[tn1],      12(%[src])                     \n\t"
-        "dpa.w.ph         $ac2,        %[p2],          %[vector1b]    \n\t"
-        "dpa.w.ph         $ac2,        %[p3],          %[vector2b]    \n\t"
-        "dpa.w.ph         $ac2,        %[p4],          %[vector3b]    \n\t"
-        "dpa.w.ph         $ac2,        %[p1],          %[vector4b]    \n\t"
-        "extp             %[Temp3],    $ac2,           31             \n\t"
-
-        /* even 3. pixel */
-        "lbux             %[st0],      %[Temp1](%[cm])                \n\t"
-        "mtlo             %[vector4a], $ac1                           \n\t"
-        "mthi             $zero,       $ac1                           \n\t"
-        "preceu.ph.qbr    %[p2],       %[tn1]                         \n\t"
-        "dpa.w.ph         $ac1,        %[p3],          %[vector1b]    \n\t"
-        "dpa.w.ph         $ac1,        %[p4],          %[vector2b]    \n\t"
-        "dpa.w.ph         $ac1,        %[p1],          %[vector3b]    \n\t"
-        "dpa.w.ph         $ac1,        %[n1],          %[vector4b]    \n\t"
-        "extp             %[Temp1],    $ac1,           31             \n\t"
-
-        /* even 4. pixel */
-        "mtlo             %[vector4a], $ac2                           \n\t"
-        "mthi             $zero,       $ac2                           \n\t"
-        "mtlo             %[vector4a], $ac3                           \n\t"
-        "mthi             $zero,       $ac3                           \n\t"
-        "sb               %[st0],      0(%[dst])                      \n\t"
-        "lbux             %[st1],      %[Temp3](%[cm])                \n\t"
-
-        "balign           %[tn3],      %[tn1],         3              \n\t"
-        "balign           %[tn1],      %[tn2],         3              \n\t"
-        "balign           %[tn2],      %[tp2],         3              \n\t"
-        "balign           %[tp2],      %[tp1],         3              \n\t"
-
-        "dpa.w.ph         $ac2,        %[p4],          %[vector1b]    \n\t"
-        "dpa.w.ph         $ac2,        %[p1],          %[vector2b]    \n\t"
-        "dpa.w.ph         $ac2,        %[n1],          %[vector3b]    \n\t"
-        "dpa.w.ph         $ac2,        %[p2],          %[vector4b]    \n\t"
-        "extp             %[Temp3],    $ac2,           31             \n\t"
-
-        "lbux             %[st0],      %[Temp1](%[cm])                \n\t"
-
-        /* odd 1. pixel */
-        "mtlo             %[vector4a], $ac1                           \n\t"
-        "mthi             $zero,       $ac1                           \n\t"
-        "sb               %[st1],      2(%[dst])                      \n\t"
-        "preceu.ph.qbr    %[p1],       %[tp2]                         \n\t"
-        "preceu.ph.qbl    %[p2],       %[tp2]                         \n\t"
-        "preceu.ph.qbr    %[p3],       %[tn2]                         \n\t"
-        "preceu.ph.qbl    %[p4],       %[tn2]                         \n\t"
-        "sb               %[st0],      4(%[dst])                      \n\t"
-        "dpa.w.ph         $ac3,        %[p1],          %[vector1b]    \n\t"
-        "dpa.w.ph         $ac3,        %[p2],          %[vector2b]    \n\t"
-        "dpa.w.ph         $ac3,        %[p3],          %[vector3b]    \n\t"
-        "dpa.w.ph         $ac3,        %[p4],          %[vector4b]    \n\t"
-        "extp             %[Temp2],    $ac3,           31             \n\t"
-
-        /* odd 2. pixel */
-        "mtlo             %[vector4a], $ac3                           \n\t"
-        "mthi             $zero,       $ac3                           \n\t"
-        "mtlo             %[vector4a], $ac2                           \n\t"
-        "mthi             $zero,       $ac2                           \n\t"
-        "preceu.ph.qbr    %[p1],       %[tn1]                         \n\t"
-        "preceu.ph.qbl    %[n1],       %[tn1]                         \n\t"
-        "lbux             %[st0],      %[Temp3](%[cm])                \n\t"
-        "dpa.w.ph         $ac1,        %[p2],          %[vector1b]    \n\t"
-        "dpa.w.ph         $ac1,        %[p3],          %[vector2b]    \n\t"
-        "dpa.w.ph         $ac1,        %[p4],          %[vector3b]    \n\t"
-        "dpa.w.ph         $ac1,        %[p1],          %[vector4b]    \n\t"
-        "extp             %[Temp3],    $ac1,           31             \n\t"
-
-        /* odd 3. pixel */
-        "lbux             %[st1],      %[Temp2](%[cm])                \n\t"
-        "preceu.ph.qbr    %[p2],       %[tn3]                         \n\t"
-        "dpa.w.ph         $ac3,        %[p3],          %[vector1b]    \n\t"
-        "dpa.w.ph         $ac3,        %[p4],          %[vector2b]    \n\t"
-        "dpa.w.ph         $ac3,        %[p1],          %[vector3b]    \n\t"
-        "dpa.w.ph         $ac3,        %[n1],          %[vector4b]    \n\t"
-        "extp             %[Temp2],    $ac3,           31             \n\t"
-
-        /* odd 4. pixel */
-        "sb               %[st1],      1(%[dst])                      \n\t"
-        "sb               %[st0],      6(%[dst])                      \n\t"
-        "dpa.w.ph         $ac2,        %[p4],          %[vector1b]    \n\t"
-        "dpa.w.ph         $ac2,        %[p1],          %[vector2b]    \n\t"
-        "dpa.w.ph         $ac2,        %[n1],          %[vector3b]    \n\t"
-        "dpa.w.ph         $ac2,        %[p2],          %[vector4b]    \n\t"
-        "extp             %[Temp1],    $ac2,           31             \n\t"
-
-        /* clamp */
-        "lbux             %[p4],       %[Temp3](%[cm])                \n\t"
-        "lbux             %[p2],       %[Temp2](%[cm])                \n\t"
-        "lbux             %[n1],       %[Temp1](%[cm])                \n\t"
-
-        /* store bytes */
-        "sb               %[p4],       3(%[dst])                      \n\t"
-        "sb               %[p2],       5(%[dst])                      \n\t"
-        "sb               %[n1],       7(%[dst])                      \n\t"
-
-        : [tp1] "=&r" (tp1), [tp2] "=&r" (tp2),
-          [tn1] "=&r" (tn1), [tn2] "=&r" (tn2), [tn3] "=&r" (tn3),
-          [st0] "=&r" (st0), [st1] "=&r" (st1),
-          [p1] "=&r" (p1), [p2] "=&r" (p2), [p3] "=&r" (p3), [p4] "=&r" (p4),
-          [n1] "=&r" (n1),
-          [Temp1] "=&r" (Temp1), [Temp2] "=&r" (Temp2), [Temp3] "=&r" (Temp3)
-        : [vector1b] "r" (vector1b), [vector2b] "r" (vector2b),
-          [vector3b] "r" (vector3b), [vector4b] "r" (vector4b),
-          [vector4a] "r" (vector4a),
-          [cm] "r" (cm), [dst] "r" (dst), [src] "r" (src)
-    );
-
-    /* Next row... */
-    src += src_stride;
-    dst += dst_stride;
-  }
-}
-
-static void convolve_horiz_16_dspr2(const uint8_t *src_ptr,
-                                    int32_t src_stride,
-                                    uint8_t *dst_ptr,
-                                    int32_t dst_stride,
-                                    const int16_t *filter_x0,
-                                    int32_t h,
-                                    int32_t count) {
-  int32_t y, c;
-  const uint8_t *src;
-  uint8_t *dst;
-  uint8_t *cm = vpx_ff_cropTbl;
-  uint32_t vector_64 = 64;
-  int32_t filter12, filter34, filter56, filter78;
-  int32_t Temp1, Temp2, Temp3;
-  uint32_t qload1, qload2, qload3;
-  uint32_t p1, p2, p3, p4, p5;
-  uint32_t st1, st2, st3;
-
-  filter12 = ((const int32_t *)filter_x0)[0];
-  filter34 = ((const int32_t *)filter_x0)[1];
-  filter56 = ((const int32_t *)filter_x0)[2];
-  filter78 = ((const int32_t *)filter_x0)[3];
-
-  for (y = h; y--;) {
-    src = src_ptr;
-    dst = dst_ptr;
-
-    /* prefetch data to cache memory */
-    prefetch_load(src_ptr + src_stride);
-    prefetch_load(src_ptr + src_stride + 32);
-    prefetch_store(dst_ptr + dst_stride);
-
-    for (c = 0; c < count; c++) {
-      __asm__ __volatile__ (
-          "ulw              %[qload1],    0(%[src])                    \n\t"
-          "ulw              %[qload2],    4(%[src])                    \n\t"
-
-          /* even 1. pixel */
-          "mtlo             %[vector_64], $ac1                         \n\t" /* even 1 */
-          "mthi             $zero,        $ac1                         \n\t"
-          "mtlo             %[vector_64], $ac2                         \n\t" /* even 2 */
-          "mthi             $zero,        $ac2                         \n\t"
-          "preceu.ph.qbr    %[p1],        %[qload1]                    \n\t"
-          "preceu.ph.qbl    %[p2],        %[qload1]                    \n\t"
-          "preceu.ph.qbr    %[p3],        %[qload2]                    \n\t"
-          "preceu.ph.qbl    %[p4],        %[qload2]                    \n\t"
-          "ulw              %[qload3],    8(%[src])                    \n\t"
-          "dpa.w.ph         $ac1,         %[p1],          %[filter12]  \n\t" /* even 1 */
-          "dpa.w.ph         $ac1,         %[p2],          %[filter34]  \n\t" /* even 1 */
-          "dpa.w.ph         $ac1,         %[p3],          %[filter56]  \n\t" /* even 1 */
-          "dpa.w.ph         $ac1,         %[p4],          %[filter78]  \n\t" /* even 1 */
-          "extp             %[Temp1],     $ac1,           31           \n\t" /* even 1 */
-
-          /* even 2. pixel */
-          "mtlo             %[vector_64], $ac3                         \n\t" /* even 3 */
-          "mthi             $zero,        $ac3                         \n\t"
-          "preceu.ph.qbr    %[p1],        %[qload3]                    \n\t"
-          "preceu.ph.qbl    %[p5],        %[qload3]                    \n\t"
-          "ulw              %[qload1],    12(%[src])                   \n\t"
-          "dpa.w.ph         $ac2,         %[p2],          %[filter12]  \n\t" /* even 1 */
-          "dpa.w.ph         $ac2,         %[p3],          %[filter34]  \n\t" /* even 1 */
-          "dpa.w.ph         $ac2,         %[p4],          %[filter56]  \n\t" /* even 1 */
-          "dpa.w.ph         $ac2,         %[p1],          %[filter78]  \n\t" /* even 1 */
-          "extp             %[Temp2],     $ac2,           31           \n\t" /* even 1 */
-          "lbux             %[st1],       %[Temp1](%[cm])              \n\t" /* even 1 */
-
-          /* even 3. pixel */
-          "mtlo             %[vector_64], $ac1                         \n\t" /* even 4 */
-          "mthi             $zero,        $ac1                         \n\t"
-          "preceu.ph.qbr    %[p2],        %[qload1]                    \n\t"
-          "sb               %[st1],       0(%[dst])                    \n\t" /* even 1 */
-          "dpa.w.ph         $ac3,         %[p3],          %[filter12]  \n\t" /* even 3 */
-          "dpa.w.ph         $ac3,         %[p4],          %[filter34]  \n\t" /* even 3 */
-          "dpa.w.ph         $ac3,         %[p1],          %[filter56]  \n\t" /* even 3 */
-          "dpa.w.ph         $ac3,         %[p5],          %[filter78]  \n\t" /* even 3 */
-          "extp             %[Temp3],     $ac3,           31           \n\t" /* even 3 */
-          "lbux             %[st2],       %[Temp2](%[cm])              \n\t" /* even 1 */
-
-          /* even 4. pixel */
-          "mtlo             %[vector_64], $ac2                         \n\t" /* even 5 */
-          "mthi             $zero,        $ac2                         \n\t"
-          "preceu.ph.qbl    %[p3],        %[qload1]                    \n\t"
-          "sb               %[st2],       2(%[dst])                    \n\t" /* even 1 */
-          "ulw              %[qload2],    16(%[src])                   \n\t"
-          "dpa.w.ph         $ac1,         %[p4],          %[filter12]  \n\t" /* even 4 */
-          "dpa.w.ph         $ac1,         %[p1],          %[filter34]  \n\t" /* even 4 */
-          "dpa.w.ph         $ac1,         %[p5],          %[filter56]  \n\t" /* even 4 */
-          "dpa.w.ph         $ac1,         %[p2],          %[filter78]  \n\t" /* even 4 */
-          "extp             %[Temp1],     $ac1,           31           \n\t" /* even 4 */
-          "lbux             %[st3],       %[Temp3](%[cm])              \n\t" /* even 3 */
-
-          /* even 5. pixel */
-          "mtlo             %[vector_64], $ac3                         \n\t" /* even 6 */
-          "mthi             $zero,        $ac3                         \n\t"
-          "preceu.ph.qbr    %[p4],        %[qload2]                    \n\t"
-          "sb               %[st3],       4(%[dst])                    \n\t" /* even 3 */
-          "dpa.w.ph         $ac2,         %[p1],          %[filter12]  \n\t" /* even 5 */
-          "dpa.w.ph         $ac2,         %[p5],          %[filter34]  \n\t" /* even 5 */
-          "dpa.w.ph         $ac2,         %[p2],          %[filter56]  \n\t" /* even 5 */
-          "dpa.w.ph         $ac2,         %[p3],          %[filter78]  \n\t" /* even 5 */
-          "extp             %[Temp2],     $ac2,           31           \n\t" /* even 5 */
-          "lbux             %[st1],       %[Temp1](%[cm])              \n\t" /* even 4 */
-
-          /* even 6. pixel */
-          "mtlo             %[vector_64], $ac1                         \n\t" /* even 7 */
-          "mthi             $zero,        $ac1                         \n\t"
-          "preceu.ph.qbl    %[p1],        %[qload2]                    \n\t"
-          "sb               %[st1],       6(%[dst])                    \n\t" /* even 4 */
-          "ulw              %[qload3],    20(%[src])                   \n\t"
-          "dpa.w.ph         $ac3,         %[p5],          %[filter12]  \n\t" /* even 6 */
-          "dpa.w.ph         $ac3,         %[p2],          %[filter34]  \n\t" /* even 6 */
-          "dpa.w.ph         $ac3,         %[p3],          %[filter56]  \n\t" /* even 6 */
-          "dpa.w.ph         $ac3,         %[p4],          %[filter78]  \n\t" /* even 6 */
-          "extp             %[Temp3],     $ac3,           31           \n\t" /* even 6 */
-          "lbux             %[st2],       %[Temp2](%[cm])              \n\t" /* even 5 */
-
-          /* even 7. pixel */
-          "mtlo             %[vector_64], $ac2                         \n\t" /* even 8 */
-          "mthi             $zero,        $ac2                         \n\t"
-          "preceu.ph.qbr    %[p5],        %[qload3]                    \n\t"
-          "sb               %[st2],       8(%[dst])                    \n\t" /* even 5 */
-          "dpa.w.ph         $ac1,         %[p2],          %[filter12]  \n\t" /* even 7 */
-          "dpa.w.ph         $ac1,         %[p3],          %[filter34]  \n\t" /* even 7 */
-          "dpa.w.ph         $ac1,         %[p4],          %[filter56]  \n\t" /* even 7 */
-          "dpa.w.ph         $ac1,         %[p1],          %[filter78]  \n\t" /* even 7 */
-          "extp             %[Temp1],     $ac1,           31           \n\t" /* even 7 */
-          "lbux             %[st3],       %[Temp3](%[cm])              \n\t" /* even 6 */
-
-          /* even 8. pixel */
-          "mtlo             %[vector_64], $ac3                         \n\t" /* odd 1 */
-          "mthi             $zero,        $ac3                         \n\t"
-          "dpa.w.ph         $ac2,         %[p3],          %[filter12]  \n\t" /* even 8 */
-          "dpa.w.ph         $ac2,         %[p4],          %[filter34]  \n\t" /* even 8 */
-          "sb               %[st3],       10(%[dst])                   \n\t" /* even 6 */
-          "dpa.w.ph         $ac2,         %[p1],          %[filter56]  \n\t" /* even 8 */
-          "dpa.w.ph         $ac2,         %[p5],          %[filter78]  \n\t" /* even 8 */
-          "extp             %[Temp2],     $ac2,           31           \n\t" /* even 8 */
-          "lbux             %[st1],       %[Temp1](%[cm])              \n\t" /* even 7 */
-
-          /* ODD pixels */
-          "ulw              %[qload1],    1(%[src])                    \n\t"
-          "ulw              %[qload2],    5(%[src])                    \n\t"
-
-          /* odd 1. pixel */
-          "mtlo             %[vector_64], $ac1                         \n\t" /* odd 2 */
-          "mthi             $zero,        $ac1                         \n\t"
-          "preceu.ph.qbr    %[p1],        %[qload1]                    \n\t"
-          "preceu.ph.qbl    %[p2],        %[qload1]                    \n\t"
-          "preceu.ph.qbr    %[p3],        %[qload2]                    \n\t"
-          "preceu.ph.qbl    %[p4],        %[qload2]                    \n\t"
-          "sb               %[st1],       12(%[dst])                   \n\t" /* even 7 */
-          "ulw              %[qload3],    9(%[src])                    \n\t"
-          "dpa.w.ph         $ac3,         %[p1],          %[filter12]  \n\t" /* odd 1 */
-          "dpa.w.ph         $ac3,         %[p2],          %[filter34]  \n\t" /* odd 1 */
-          "dpa.w.ph         $ac3,         %[p3],          %[filter56]  \n\t" /* odd 1 */
-          "dpa.w.ph         $ac3,         %[p4],          %[filter78]  \n\t" /* odd 1 */
-          "extp             %[Temp3],     $ac3,           31           \n\t" /* odd 1 */
-          "lbux             %[st2],       %[Temp2](%[cm])              \n\t" /* even 8 */
-
-          /* odd 2. pixel */
-          "mtlo             %[vector_64], $ac2                         \n\t" /* odd 3 */
-          "mthi             $zero,        $ac2                         \n\t"
-          "preceu.ph.qbr    %[p1],        %[qload3]                    \n\t"
-          "preceu.ph.qbl    %[p5],        %[qload3]                    \n\t"
-          "sb               %[st2],       14(%[dst])                   \n\t" /* even 8 */
-          "ulw              %[qload1],    13(%[src])                   \n\t"
-          "dpa.w.ph         $ac1,         %[p2],          %[filter12]  \n\t" /* odd 2 */
-          "dpa.w.ph         $ac1,         %[p3],          %[filter34]  \n\t" /* odd 2 */
-          "dpa.w.ph         $ac1,         %[p4],          %[filter56]  \n\t" /* odd 2 */
-          "dpa.w.ph         $ac1,         %[p1],          %[filter78]  \n\t" /* odd 2 */
-          "extp             %[Temp1],     $ac1,           31           \n\t" /* odd 2 */
-          "lbux             %[st3],       %[Temp3](%[cm])              \n\t" /* odd 1 */
-
-          /* odd 3. pixel */
-          "mtlo             %[vector_64], $ac3                         \n\t" /* odd 4 */
-          "mthi             $zero,        $ac3                         \n\t"
-          "preceu.ph.qbr    %[p2],        %[qload1]                    \n\t"
-          "sb               %[st3],       1(%[dst])                    \n\t" /* odd 1 */
-          "dpa.w.ph         $ac2,         %[p3],          %[filter12]  \n\t" /* odd 3 */
-          "dpa.w.ph         $ac2,         %[p4],          %[filter34]  \n\t" /* odd 3 */
-          "dpa.w.ph         $ac2,         %[p1],          %[filter56]  \n\t" /* odd 3 */
-          "dpa.w.ph         $ac2,         %[p5],          %[filter78]  \n\t" /* odd 3 */
-          "extp             %[Temp2],     $ac2,           31           \n\t" /* odd 3 */
-          "lbux             %[st1],       %[Temp1](%[cm])              \n\t" /* odd 2 */
-
-          /* odd 4. pixel */
-          "mtlo             %[vector_64], $ac1                         \n\t" /* odd 5 */
-          "mthi             $zero,        $ac1                         \n\t"
-          "preceu.ph.qbl    %[p3],        %[qload1]                    \n\t"
-          "sb               %[st1],       3(%[dst])                    \n\t" /* odd 2 */
-          "ulw              %[qload2],    17(%[src])                   \n\t"
-          "dpa.w.ph         $ac3,         %[p4],          %[filter12]  \n\t" /* odd 4 */
-          "dpa.w.ph         $ac3,         %[p1],          %[filter34]  \n\t" /* odd 4 */
-          "dpa.w.ph         $ac3,         %[p5],          %[filter56]  \n\t" /* odd 4 */
-          "dpa.w.ph         $ac3,         %[p2],          %[filter78]  \n\t" /* odd 4 */
-          "extp             %[Temp3],     $ac3,           31           \n\t" /* odd 4 */
-          "lbux             %[st2],       %[Temp2](%[cm])              \n\t" /* odd 3 */
-
-          /* odd 5. pixel */
-          "mtlo             %[vector_64], $ac2                         \n\t" /* odd 6 */
-          "mthi             $zero,        $ac2                         \n\t"
-          "preceu.ph.qbr    %[p4],        %[qload2]                    \n\t"
-          "sb               %[st2],       5(%[dst])                    \n\t" /* odd 3 */
-          "dpa.w.ph         $ac1,         %[p1],          %[filter12]  \n\t" /* odd 5 */
-          "dpa.w.ph         $ac1,         %[p5],          %[filter34]  \n\t" /* odd 5 */
-          "dpa.w.ph         $ac1,         %[p2],          %[filter56]  \n\t" /* odd 5 */
-          "dpa.w.ph         $ac1,         %[p3],          %[filter78]  \n\t" /* odd 5 */
-          "extp             %[Temp1],     $ac1,           31           \n\t" /* odd 5 */
-          "lbux             %[st3],       %[Temp3](%[cm])              \n\t" /* odd 4 */
-
-          /* odd 6. pixel */
-          "mtlo             %[vector_64], $ac3                         \n\t" /* odd 7 */
-          "mthi             $zero,        $ac3                         \n\t"
-          "preceu.ph.qbl    %[p1],        %[qload2]                    \n\t"
-          "sb               %[st3],       7(%[dst])                    \n\t" /* odd 4 */
-          "ulw              %[qload3],    21(%[src])                   \n\t"
-          "dpa.w.ph         $ac2,         %[p5],          %[filter12]  \n\t" /* odd 6 */
-          "dpa.w.ph         $ac2,         %[p2],          %[filter34]  \n\t" /* odd 6 */
-          "dpa.w.ph         $ac2,         %[p3],          %[filter56]  \n\t" /* odd 6 */
-          "dpa.w.ph         $ac2,         %[p4],          %[filter78]  \n\t" /* odd 6 */
-          "extp             %[Temp2],     $ac2,           31           \n\t" /* odd 6 */
-          "lbux             %[st1],       %[Temp1](%[cm])              \n\t" /* odd 5 */
-
-          /* odd 7. pixel */
-          "mtlo             %[vector_64], $ac1                         \n\t" /* odd 8 */
-          "mthi             $zero,        $ac1                         \n\t"
-          "preceu.ph.qbr    %[p5],        %[qload3]                    \n\t"
-          "sb               %[st1],       9(%[dst])                    \n\t" /* odd 5 */
-          "dpa.w.ph         $ac3,         %[p2],          %[filter12]  \n\t" /* odd 7 */
-          "dpa.w.ph         $ac3,         %[p3],          %[filter34]  \n\t" /* odd 7 */
-          "dpa.w.ph         $ac3,         %[p4],          %[filter56]  \n\t" /* odd 7 */
-          "dpa.w.ph         $ac3,         %[p1],          %[filter78]  \n\t" /* odd 7 */
-          "extp             %[Temp3],     $ac3,           31           \n\t" /* odd 7 */
-
-          /* odd 8. pixel */
-          "dpa.w.ph         $ac1,         %[p3],          %[filter12]  \n\t" /* odd 8 */
-          "dpa.w.ph         $ac1,         %[p4],          %[filter34]  \n\t" /* odd 8 */
-          "dpa.w.ph         $ac1,         %[p1],          %[filter56]  \n\t" /* odd 8 */
-          "dpa.w.ph         $ac1,         %[p5],          %[filter78]  \n\t" /* odd 8 */
-          "extp             %[Temp1],     $ac1,           31           \n\t" /* odd 8 */
-
-          "lbux             %[st2],       %[Temp2](%[cm])              \n\t" /* odd 6 */
-          "lbux             %[st3],       %[Temp3](%[cm])              \n\t" /* odd 7 */
-          "lbux             %[st1],       %[Temp1](%[cm])              \n\t" /* odd 8 */
-
-          "sb               %[st2],       11(%[dst])                   \n\t" /* odd 6 */
-          "sb               %[st3],       13(%[dst])                   \n\t" /* odd 7 */
-          "sb               %[st1],       15(%[dst])                   \n\t" /* odd 8 */
-
-          : [qload1] "=&r" (qload1), [qload2] "=&r" (qload2), [qload3] "=&r" (qload3),
-            [st1] "=&r" (st1), [st2] "=&r" (st2), [st3] "=&r" (st3),
-            [p1] "=&r" (p1), [p2] "=&r" (p2), [p3] "=&r" (p3), [p4] "=&r" (p4),
-            [p5] "=&r" (p5),
-            [Temp1] "=&r" (Temp1), [Temp2] "=&r" (Temp2), [Temp3] "=&r" (Temp3)
-          : [filter12] "r" (filter12), [filter34] "r" (filter34),
-            [filter56] "r" (filter56), [filter78] "r" (filter78),
-            [vector_64] "r" (vector_64),
-            [cm] "r" (cm), [dst] "r" (dst),
-            [src] "r" (src)
-      );
-
-      src += 16;
-      dst += 16;
-    }
-
-    /* Next row... */
-    src_ptr += src_stride;
-    dst_ptr += dst_stride;
-  }
-}
-
-static void convolve_horiz_64_dspr2(const uint8_t *src_ptr,
-                                    int32_t src_stride,
-                                    uint8_t *dst_ptr,
-                                    int32_t dst_stride,
-                                    const int16_t *filter_x0,
-                                    int32_t h) {
-  int32_t y, c;
-  const uint8_t *src;
-  uint8_t *dst;
-  uint8_t *cm = vpx_ff_cropTbl;
-  uint32_t vector_64 = 64;
-  int32_t filter12, filter34, filter56, filter78;
-  int32_t Temp1, Temp2, Temp3;
-  uint32_t qload1, qload2, qload3;
-  uint32_t p1, p2, p3, p4, p5;
-  uint32_t st1, st2, st3;
-
-  filter12 = ((const int32_t *)filter_x0)[0];
-  filter34 = ((const int32_t *)filter_x0)[1];
-  filter56 = ((const int32_t *)filter_x0)[2];
-  filter78 = ((const int32_t *)filter_x0)[3];
-
-  for (y = h; y--;) {
-    src = src_ptr;
-    dst = dst_ptr;
-
-    /* prefetch data to cache memory */
-    prefetch_load(src_ptr + src_stride);
-    prefetch_load(src_ptr + src_stride + 32);
-    prefetch_load(src_ptr + src_stride + 64);
-    prefetch_store(dst_ptr + dst_stride);
-    prefetch_store(dst_ptr + dst_stride + 32);
-
-    for (c = 0; c < 4; c++) {
-      __asm__ __volatile__ (
-          "ulw              %[qload1],    0(%[src])                    \n\t"
-          "ulw              %[qload2],    4(%[src])                    \n\t"
-
-          /* even 1. pixel */
-          "mtlo             %[vector_64], $ac1                         \n\t" /* even 1 */
-          "mthi             $zero,        $ac1                         \n\t"
-          "mtlo             %[vector_64], $ac2                         \n\t" /* even 2 */
-          "mthi             $zero,        $ac2                         \n\t"
-          "preceu.ph.qbr    %[p1],        %[qload1]                    \n\t"
-          "preceu.ph.qbl    %[p2],        %[qload1]                    \n\t"
-          "preceu.ph.qbr    %[p3],        %[qload2]                    \n\t"
-          "preceu.ph.qbl    %[p4],        %[qload2]                    \n\t"
-          "ulw              %[qload3],    8(%[src])                    \n\t"
-          "dpa.w.ph         $ac1,         %[p1],          %[filter12]  \n\t" /* even 1 */
-          "dpa.w.ph         $ac1,         %[p2],          %[filter34]  \n\t" /* even 1 */
-          "dpa.w.ph         $ac1,         %[p3],          %[filter56]  \n\t" /* even 1 */
-          "dpa.w.ph         $ac1,         %[p4],          %[filter78]  \n\t" /* even 1 */
-          "extp             %[Temp1],     $ac1,           31           \n\t" /* even 1 */
-
-          /* even 2. pixel */
-          "mtlo             %[vector_64], $ac3                         \n\t" /* even 3 */
-          "mthi             $zero,        $ac3                         \n\t"
-          "preceu.ph.qbr    %[p1],        %[qload3]                    \n\t"
-          "preceu.ph.qbl    %[p5],        %[qload3]                    \n\t"
-          "ulw              %[qload1],    12(%[src])                   \n\t"
-          "dpa.w.ph         $ac2,         %[p2],          %[filter12]  \n\t" /* even 1 */
-          "dpa.w.ph         $ac2,         %[p3],          %[filter34]  \n\t" /* even 1 */
-          "dpa.w.ph         $ac2,         %[p4],          %[filter56]  \n\t" /* even 1 */
-          "dpa.w.ph         $ac2,         %[p1],          %[filter78]  \n\t" /* even 1 */
-          "extp             %[Temp2],     $ac2,           31           \n\t" /* even 1 */
-          "lbux             %[st1],       %[Temp1](%[cm])              \n\t" /* even 1 */
-
-          /* even 3. pixel */
-          "mtlo             %[vector_64], $ac1                         \n\t" /* even 4 */
-          "mthi             $zero,        $ac1                         \n\t"
-          "preceu.ph.qbr    %[p2],        %[qload1]                    \n\t"
-          "sb               %[st1],       0(%[dst])                    \n\t" /* even 1 */
-          "dpa.w.ph         $ac3,         %[p3],          %[filter12]  \n\t" /* even 3 */
-          "dpa.w.ph         $ac3,         %[p4],          %[filter34]  \n\t" /* even 3 */
-          "dpa.w.ph         $ac3,         %[p1],          %[filter56]  \n\t" /* even 3 */
-          "dpa.w.ph         $ac3,         %[p5],          %[filter78]  \n\t" /* even 3 */
-          "extp             %[Temp3],     $ac3,           31           \n\t" /* even 3 */
-          "lbux             %[st2],       %[Temp2](%[cm])              \n\t" /* even 1 */
-
-          /* even 4. pixel */
-          "mtlo             %[vector_64], $ac2                         \n\t" /* even 5 */
-          "mthi             $zero,        $ac2                         \n\t"
-          "preceu.ph.qbl    %[p3],        %[qload1]                    \n\t"
-          "sb               %[st2],       2(%[dst])                    \n\t" /* even 1 */
-          "ulw              %[qload2],    16(%[src])                   \n\t"
-          "dpa.w.ph         $ac1,         %[p4],          %[filter12]  \n\t" /* even 4 */
-          "dpa.w.ph         $ac1,         %[p1],          %[filter34]  \n\t" /* even 4 */
-          "dpa.w.ph         $ac1,         %[p5],          %[filter56]  \n\t" /* even 4 */
-          "dpa.w.ph         $ac1,         %[p2],          %[filter78]  \n\t" /* even 4 */
-          "extp             %[Temp1],     $ac1,           31           \n\t" /* even 4 */
-          "lbux             %[st3],       %[Temp3](%[cm])              \n\t" /* even 3 */
-
-          /* even 5. pixel */
-          "mtlo             %[vector_64], $ac3                         \n\t" /* even 6 */
-          "mthi             $zero,        $ac3                         \n\t"
-          "preceu.ph.qbr    %[p4],        %[qload2]                    \n\t"
-          "sb               %[st3],       4(%[dst])                    \n\t" /* even 3 */
-          "dpa.w.ph         $ac2,         %[p1],          %[filter12]  \n\t" /* even 5 */
-          "dpa.w.ph         $ac2,         %[p5],          %[filter34]  \n\t" /* even 5 */
-          "dpa.w.ph         $ac2,         %[p2],          %[filter56]  \n\t" /* even 5 */
-          "dpa.w.ph         $ac2,         %[p3],          %[filter78]  \n\t" /* even 5 */
-          "extp             %[Temp2],     $ac2,           31           \n\t" /* even 5 */
-          "lbux             %[st1],       %[Temp1](%[cm])              \n\t" /* even 4 */
-
-          /* even 6. pixel */
-          "mtlo             %[vector_64], $ac1                         \n\t" /* even 7 */
-          "mthi             $zero,        $ac1                         \n\t"
-          "preceu.ph.qbl    %[p1],        %[qload2]                    \n\t"
-          "sb               %[st1],       6(%[dst])                    \n\t" /* even 4 */
-          "ulw              %[qload3],    20(%[src])                   \n\t"
-          "dpa.w.ph         $ac3,         %[p5],          %[filter12]  \n\t" /* even 6 */
-          "dpa.w.ph         $ac3,         %[p2],          %[filter34]  \n\t" /* even 6 */
-          "dpa.w.ph         $ac3,         %[p3],          %[filter56]  \n\t" /* even 6 */
-          "dpa.w.ph         $ac3,         %[p4],          %[filter78]  \n\t" /* even 6 */
-          "extp             %[Temp3],     $ac3,           31           \n\t" /* even 6 */
-          "lbux             %[st2],       %[Temp2](%[cm])              \n\t" /* even 5 */
-
-          /* even 7. pixel */
-          "mtlo             %[vector_64], $ac2                         \n\t" /* even 8 */
-          "mthi             $zero,        $ac2                         \n\t"
-          "preceu.ph.qbr    %[p5],        %[qload3]                    \n\t"
-          "sb               %[st2],       8(%[dst])                    \n\t" /* even 5 */
-          "dpa.w.ph         $ac1,         %[p2],          %[filter12]  \n\t" /* even 7 */
-          "dpa.w.ph         $ac1,         %[p3],          %[filter34]  \n\t" /* even 7 */
-          "dpa.w.ph         $ac1,         %[p4],          %[filter56]  \n\t" /* even 7 */
-          "dpa.w.ph         $ac1,         %[p1],          %[filter78]  \n\t" /* even 7 */
-          "extp             %[Temp1],     $ac1,           31           \n\t" /* even 7 */
-          "lbux             %[st3],       %[Temp3](%[cm])              \n\t" /* even 6 */
-
-          /* even 8. pixel */
-          "mtlo             %[vector_64], $ac3                         \n\t" /* odd 1 */
-          "mthi             $zero,        $ac3                         \n\t"
-          "dpa.w.ph         $ac2,         %[p3],          %[filter12]  \n\t" /* even 8 */
-          "dpa.w.ph         $ac2,         %[p4],          %[filter34]  \n\t" /* even 8 */
-          "sb               %[st3],       10(%[dst])                   \n\t" /* even 6 */
-          "dpa.w.ph         $ac2,         %[p1],          %[filter56]  \n\t" /* even 8 */
-          "dpa.w.ph         $ac2,         %[p5],          %[filter78]  \n\t" /* even 8 */
-          "extp             %[Temp2],     $ac2,           31           \n\t" /* even 8 */
-          "lbux             %[st1],       %[Temp1](%[cm])              \n\t" /* even 7 */
-
-          /* ODD pixels */
-          "ulw              %[qload1],    1(%[src])                    \n\t"
-          "ulw              %[qload2],    5(%[src])                    \n\t"
-
-          /* odd 1. pixel */
-          "mtlo             %[vector_64], $ac1                         \n\t" /* odd 2 */
-          "mthi             $zero,        $ac1                         \n\t"
-          "preceu.ph.qbr    %[p1],        %[qload1]                    \n\t"
-          "preceu.ph.qbl    %[p2],        %[qload1]                    \n\t"
-          "preceu.ph.qbr    %[p3],        %[qload2]                    \n\t"
-          "preceu.ph.qbl    %[p4],        %[qload2]                    \n\t"
-          "sb               %[st1],       12(%[dst])                   \n\t" /* even 7 */
-          "ulw              %[qload3],    9(%[src])                    \n\t"
-          "dpa.w.ph         $ac3,         %[p1],          %[filter12]  \n\t" /* odd 1 */
-          "dpa.w.ph         $ac3,         %[p2],          %[filter34]  \n\t" /* odd 1 */
-          "dpa.w.ph         $ac3,         %[p3],          %[filter56]  \n\t" /* odd 1 */
-          "dpa.w.ph         $ac3,         %[p4],          %[filter78]  \n\t" /* odd 1 */
-          "extp             %[Temp3],     $ac3,           31           \n\t" /* odd 1 */
-          "lbux             %[st2],       %[Temp2](%[cm])              \n\t" /* even 8 */
-
-          /* odd 2. pixel */
-          "mtlo             %[vector_64], $ac2                         \n\t" /* odd 3 */
-          "mthi             $zero,        $ac2                         \n\t"
-          "preceu.ph.qbr    %[p1],        %[qload3]                    \n\t"
-          "preceu.ph.qbl    %[p5],        %[qload3]                    \n\t"
-          "sb               %[st2],       14(%[dst])                   \n\t" /* even 8 */
-          "ulw              %[qload1],    13(%[src])                   \n\t"
-          "dpa.w.ph         $ac1,         %[p2],          %[filter12]  \n\t" /* odd 2 */
-          "dpa.w.ph         $ac1,         %[p3],          %[filter34]  \n\t" /* odd 2 */
-          "dpa.w.ph         $ac1,         %[p4],          %[filter56]  \n\t" /* odd 2 */
-          "dpa.w.ph         $ac1,         %[p1],          %[filter78]  \n\t" /* odd 2 */
-          "extp             %[Temp1],     $ac1,           31           \n\t" /* odd 2 */
-          "lbux             %[st3],       %[Temp3](%[cm])              \n\t" /* odd 1 */
-
-          /* odd 3. pixel */
-          "mtlo             %[vector_64], $ac3                         \n\t" /* odd 4 */
-          "mthi             $zero,        $ac3                         \n\t"
-          "preceu.ph.qbr    %[p2],        %[qload1]                    \n\t"
-          "sb               %[st3],       1(%[dst])                    \n\t" /* odd 1 */
-          "dpa.w.ph         $ac2,         %[p3],          %[filter12]  \n\t" /* odd 3 */
-          "dpa.w.ph         $ac2,         %[p4],          %[filter34]  \n\t" /* odd 3 */
-          "dpa.w.ph         $ac2,         %[p1],          %[filter56]  \n\t" /* odd 3 */
-          "dpa.w.ph         $ac2,         %[p5],          %[filter78]  \n\t" /* odd 3 */
-          "extp             %[Temp2],     $ac2,           31           \n\t" /* odd 3 */
-          "lbux             %[st1],       %[Temp1](%[cm])              \n\t" /* odd 2 */
-
-          /* odd 4. pixel */
-          "mtlo             %[vector_64], $ac1                         \n\t" /* odd 5 */
-          "mthi             $zero,        $ac1                         \n\t"
-          "preceu.ph.qbl    %[p3],        %[qload1]                    \n\t"
-          "sb               %[st1],       3(%[dst])                    \n\t" /* odd 2 */
-          "ulw              %[qload2],    17(%[src])                   \n\t"
-          "dpa.w.ph         $ac3,         %[p4],          %[filter12]  \n\t" /* odd 4 */
-          "dpa.w.ph         $ac3,         %[p1],          %[filter34]  \n\t" /* odd 4 */
-          "dpa.w.ph         $ac3,         %[p5],          %[filter56]  \n\t" /* odd 4 */
-          "dpa.w.ph         $ac3,         %[p2],          %[filter78]  \n\t" /* odd 4 */
-          "extp             %[Temp3],     $ac3,           31           \n\t" /* odd 4 */
-          "lbux             %[st2],       %[Temp2](%[cm])              \n\t" /* odd 3 */
-
-          /* odd 5. pixel */
-          "mtlo             %[vector_64], $ac2                         \n\t" /* odd 6 */
-          "mthi             $zero,        $ac2                         \n\t"
-          "preceu.ph.qbr    %[p4],        %[qload2]                    \n\t"
-          "sb               %[st2],       5(%[dst])                    \n\t" /* odd 3 */
-          "dpa.w.ph         $ac1,         %[p1],          %[filter12]  \n\t" /* odd 5 */
-          "dpa.w.ph         $ac1,         %[p5],          %[filter34]  \n\t" /* odd 5 */
-          "dpa.w.ph         $ac1,         %[p2],          %[filter56]  \n\t" /* odd 5 */
-          "dpa.w.ph         $ac1,         %[p3],          %[filter78]  \n\t" /* odd 5 */
-          "extp             %[Temp1],     $ac1,           31           \n\t" /* odd 5 */
-          "lbux             %[st3],       %[Temp3](%[cm])              \n\t" /* odd 4 */
-
-          /* odd 6. pixel */
-          "mtlo             %[vector_64], $ac3                         \n\t" /* odd 7 */
-          "mthi             $zero,        $ac3                         \n\t"
-          "preceu.ph.qbl    %[p1],        %[qload2]                    \n\t"
-          "sb               %[st3],       7(%[dst])                    \n\t" /* odd 4 */
-          "ulw              %[qload3],    21(%[src])                   \n\t"
-          "dpa.w.ph         $ac2,         %[p5],          %[filter12]  \n\t" /* odd 6 */
-          "dpa.w.ph         $ac2,         %[p2],          %[filter34]  \n\t" /* odd 6 */
-          "dpa.w.ph         $ac2,         %[p3],          %[filter56]  \n\t" /* odd 6 */
-          "dpa.w.ph         $ac2,         %[p4],          %[filter78]  \n\t" /* odd 6 */
-          "extp             %[Temp2],     $ac2,           31           \n\t" /* odd 6 */
-          "lbux             %[st1],       %[Temp1](%[cm])              \n\t" /* odd 5 */
-
-          /* odd 7. pixel */
-          "mtlo             %[vector_64], $ac1                         \n\t" /* odd 8 */
-          "mthi             $zero,        $ac1                         \n\t"
-          "preceu.ph.qbr    %[p5],        %[qload3]                    \n\t"
-          "sb               %[st1],       9(%[dst])                    \n\t" /* odd 5 */
-          "dpa.w.ph         $ac3,         %[p2],          %[filter12]  \n\t" /* odd 7 */
-          "dpa.w.ph         $ac3,         %[p3],          %[filter34]  \n\t" /* odd 7 */
-          "dpa.w.ph         $ac3,         %[p4],          %[filter56]  \n\t" /* odd 7 */
-          "dpa.w.ph         $ac3,         %[p1],          %[filter78]  \n\t" /* odd 7 */
-          "extp             %[Temp3],     $ac3,           31           \n\t" /* odd 7 */
-
-          /* odd 8. pixel */
-          "dpa.w.ph         $ac1,         %[p3],          %[filter12]  \n\t" /* odd 8 */
-          "dpa.w.ph         $ac1,         %[p4],          %[filter34]  \n\t" /* odd 8 */
-          "dpa.w.ph         $ac1,         %[p1],          %[filter56]  \n\t" /* odd 8 */
-          "dpa.w.ph         $ac1,         %[p5],          %[filter78]  \n\t" /* odd 8 */
-          "extp             %[Temp1],     $ac1,           31           \n\t" /* odd 8 */
-
-          "lbux             %[st2],       %[Temp2](%[cm])              \n\t" /* odd 6 */
-          "lbux             %[st3],       %[Temp3](%[cm])              \n\t" /* odd 7 */
-          "lbux             %[st1],       %[Temp1](%[cm])              \n\t" /* odd 8 */
-
-          "sb               %[st2],       11(%[dst])                   \n\t" /* odd 6 */
-          "sb               %[st3],       13(%[dst])                   \n\t" /* odd 7 */
-          "sb               %[st1],       15(%[dst])                   \n\t" /* odd 8 */
-
-          : [qload1] "=&r" (qload1), [qload2] "=&r" (qload2), [qload3] "=&r" (qload3),
-            [st1] "=&r" (st1), [st2] "=&r" (st2), [st3] "=&r" (st3),
-            [p1] "=&r" (p1), [p2] "=&r" (p2), [p3] "=&r" (p3), [p4] "=&r" (p4),
-            [p5] "=&r" (p5),
-            [Temp1] "=&r" (Temp1), [Temp2] "=&r" (Temp2), [Temp3] "=&r" (Temp3)
-          : [filter12] "r" (filter12), [filter34] "r" (filter34),
-            [filter56] "r" (filter56), [filter78] "r" (filter78),
-            [vector_64] "r" (vector_64),
-            [cm] "r" (cm), [dst] "r" (dst),
-            [src] "r" (src)
-      );
-
-      src += 16;
-      dst += 16;
-    }
-
-    /* Next row... */
-    src_ptr += src_stride;
-    dst_ptr += dst_stride;
-  }
-}
-
-void vpx_convolve8_horiz_dspr2(const uint8_t *src, ptrdiff_t src_stride,
-                               uint8_t *dst, ptrdiff_t dst_stride,
-                               const int16_t *filter_x, int x_step_q4,
-                               const int16_t *filter_y, int y_step_q4,
-                               int w, int h) {
-  if (((const int32_t *)filter_x)[1] == 0x800000) {
-    vpx_convolve_copy(src, src_stride,
-                      dst, dst_stride,
-                      filter_x, x_step_q4,
-                      filter_y, y_step_q4,
-                      w, h);
-  } else if (((const int32_t *)filter_x)[0] == 0) {
-    vpx_convolve2_horiz_dspr2(src, src_stride,
-                              dst, dst_stride,
-                              filter_x, x_step_q4,
-                              filter_y, y_step_q4,
-                              w, h);
-  } else {
-    if (16 == x_step_q4) {
-      uint32_t pos = 38;
-
-      prefetch_load((const uint8_t *)filter_x);
-      src -= 3;
-
-      /* bit positon for extract from acc */
-      __asm__ __volatile__ (
-        "wrdsp      %[pos],     1           \n\t"
-        :
-        : [pos] "r" (pos)
-      );
-
-      /* prefetch data to cache memory */
-      prefetch_load(src);
-      prefetch_load(src + 32);
-      prefetch_store(dst);
-
-      switch (w) {
-        case 4:
-          convolve_horiz_4_dspr2(src, (int32_t)src_stride,
-                                 dst, (int32_t)dst_stride,
-                                 filter_x, (int32_t)h);
-          break;
-        case 8:
-          convolve_horiz_8_dspr2(src, (int32_t)src_stride,
-                                 dst, (int32_t)dst_stride,
-                                 filter_x, (int32_t)h);
-          break;
-        case 16:
-          convolve_horiz_16_dspr2(src, (int32_t)src_stride,
-                                  dst, (int32_t)dst_stride,
-                                  filter_x, (int32_t)h, 1);
-          break;
-        case 32:
-          convolve_horiz_16_dspr2(src, (int32_t)src_stride,
-                                  dst, (int32_t)dst_stride,
-                                  filter_x, (int32_t)h, 2);
-          break;
-        case 64:
-          prefetch_load(src + 64);
-          prefetch_store(dst + 32);
-
-          convolve_horiz_64_dspr2(src, (int32_t)src_stride,
-                                  dst, (int32_t)dst_stride,
-                                  filter_x, (int32_t)h);
-          break;
-        default:
-          vpx_convolve8_horiz_c(src + 3, src_stride,
-                                dst, dst_stride,
-                                filter_x, x_step_q4,
-                                filter_y, y_step_q4,
-                                w, h);
-          break;
-      }
-    } else {
-      vpx_convolve8_horiz_c(src, src_stride,
-                            dst, dst_stride,
-                            filter_x, x_step_q4,
-                            filter_y, y_step_q4,
-                            w, h);
-    }
-  }
-}
-#endif
--- a/vpx_dsp/mips/vpx_convolve8_vert_dspr2.c
+++ /dev/null
@@ -1,394 +1,0 @@
-/*
- *  Copyright (c) 2013 The WebM project authors. All Rights Reserved.
- *
- *  Use of this source code is governed by a BSD-style license
- *  that can be found in the LICENSE file in the root of the source
- *  tree. An additional intellectual property rights grant can be found
- *  in the file PATENTS.  All contributing project authors may
- *  be found in the AUTHORS file in the root of the source tree.
- */
-
-#include <assert.h>
-#include <stdio.h>
-
-#include "./vpx_dsp_rtcd.h"
-#include "vpx_dsp/mips/vpx_common_dspr2.h"
-#include "vpx_dsp/vpx_dsp_common.h"
-#include "vpx_dsp/vpx_filter.h"
-#include "vpx_ports/mem.h"
-
-#if HAVE_DSPR2
-static void convolve_vert_4_dspr2(const uint8_t *src,
-                                  int32_t src_stride,
-                                  uint8_t *dst,
-                                  int32_t dst_stride,
-                                  const int16_t *filter_y,
-                                  int32_t w,
-                                  int32_t h) {
-  int32_t x, y;
-  const uint8_t *src_ptr;
-  uint8_t *dst_ptr;
-  uint8_t *cm = vpx_ff_cropTbl;
-  uint32_t vector4a = 64;
-  uint32_t load1, load2, load3, load4;
-  uint32_t p1, p2;
-  uint32_t n1, n2;
-  uint32_t scratch1, scratch2;
-  uint32_t store1, store2;
-  int32_t vector1b, vector2b, vector3b, vector4b;
-  int32_t Temp1, Temp2;
-
-  vector1b = ((const int32_t *)filter_y)[0];
-  vector2b = ((const int32_t *)filter_y)[1];
-  vector3b = ((const int32_t *)filter_y)[2];
-  vector4b = ((const int32_t *)filter_y)[3];
-
-  src -= 3 * src_stride;
-
-  for (y = h; y--;) {
-    /* prefetch data to cache memory */
-    prefetch_store(dst + dst_stride);
-
-    for (x = 0; x < w; x += 4) {
-      src_ptr = src + x;
-      dst_ptr = dst + x;
-
-      __asm__ __volatile__ (
-          "ulw              %[load1],     0(%[src_ptr])                   \n\t"
-          "add              %[src_ptr],   %[src_ptr],     %[src_stride]   \n\t"
-          "ulw              %[load2],     0(%[src_ptr])                   \n\t"
-          "add              %[src_ptr],   %[src_ptr],     %[src_stride]   \n\t"
-          "ulw              %[load3],     0(%[src_ptr])                   \n\t"
-          "add              %[src_ptr],   %[src_ptr],     %[src_stride]   \n\t"
-          "ulw              %[load4],     0(%[src_ptr])                   \n\t"
-
-          "mtlo             %[vector4a],  $ac0                            \n\t"
-          "mtlo             %[vector4a],  $ac1                            \n\t"
-          "mtlo             %[vector4a],  $ac2                            \n\t"
-          "mtlo             %[vector4a],  $ac3                            \n\t"
-          "mthi             $zero,        $ac0                            \n\t"
-          "mthi             $zero,        $ac1                            \n\t"
-          "mthi             $zero,        $ac2                            \n\t"
-          "mthi             $zero,        $ac3                            \n\t"
-
-          "preceu.ph.qbr    %[scratch1],  %[load1]                        \n\t"
-          "preceu.ph.qbr    %[p1],        %[load2]                        \n\t"
-          "precrq.ph.w      %[n1],        %[p1],          %[scratch1]     \n\t" /* pixel 2 */
-          "append           %[p1],        %[scratch1],    16              \n\t" /* pixel 1 */
-          "preceu.ph.qbr    %[scratch2],  %[load3]                        \n\t"
-          "preceu.ph.qbr    %[p2],        %[load4]                        \n\t"
-          "precrq.ph.w      %[n2],        %[p2],          %[scratch2]     \n\t" /* pixel 2 */
-          "append           %[p2],        %[scratch2],    16              \n\t" /* pixel 1 */
-
-          "dpa.w.ph         $ac0,         %[p1],          %[vector1b]     \n\t"
-          "dpa.w.ph         $ac0,         %[p2],          %[vector2b]     \n\t"
-          "dpa.w.ph         $ac1,         %[n1],          %[vector1b]     \n\t"
-          "dpa.w.ph         $ac1,         %[n2],          %[vector2b]     \n\t"
-
-          "preceu.ph.qbl    %[scratch1],  %[load1]                        \n\t"
-          "preceu.ph.qbl    %[p1],        %[load2]                        \n\t"
-          "precrq.ph.w      %[n1],        %[p1],          %[scratch1]     \n\t" /* pixel 2 */
-          "append           %[p1],        %[scratch1],    16              \n\t" /* pixel 1 */
-          "preceu.ph.qbl    %[scratch2],  %[load3]                        \n\t"
-          "preceu.ph.qbl    %[p2],        %[load4]                        \n\t"
-          "precrq.ph.w      %[n2],        %[p2],          %[scratch2]     \n\t" /* pixel 2 */
-          "append           %[p2],        %[scratch2],    16              \n\t" /* pixel 1 */
-
-          "dpa.w.ph         $ac2,         %[p1],          %[vector1b]     \n\t"
-          "dpa.w.ph         $ac2,         %[p2],          %[vector2b]     \n\t"
-          "dpa.w.ph         $ac3,         %[n1],          %[vector1b]     \n\t"
-          "dpa.w.ph         $ac3,         %[n2],          %[vector2b]     \n\t"
-
-          "add              %[src_ptr],   %[src_ptr],     %[src_stride]   \n\t"
-          "ulw              %[load1],     0(%[src_ptr])                   \n\t"
-          "add              %[src_ptr],   %[src_ptr],     %[src_stride]   \n\t"
-          "ulw              %[load2],     0(%[src_ptr])                   \n\t"
-          "add              %[src_ptr],   %[src_ptr],     %[src_stride]   \n\t"
-          "ulw              %[load3],     0(%[src_ptr])                   \n\t"
-          "add              %[src_ptr],   %[src_ptr],     %[src_stride]   \n\t"
-          "ulw              %[load4],     0(%[src_ptr])                   \n\t"
-
-          "preceu.ph.qbr    %[scratch1],  %[load1]                        \n\t"
-          "preceu.ph.qbr    %[p1],        %[load2]                        \n\t"
-          "precrq.ph.w      %[n1],        %[p1],          %[scratch1]     \n\t" /* pixel 2 */
-          "append           %[p1],        %[scratch1],    16              \n\t" /* pixel 1 */
-          "preceu.ph.qbr    %[scratch2],  %[load3]                        \n\t"
-          "preceu.ph.qbr    %[p2],        %[load4]                        \n\t"
-          "precrq.ph.w      %[n2],        %[p2],          %[scratch2]     \n\t" /* pixel 2 */
-          "append           %[p2],        %[scratch2],    16              \n\t" /* pixel 1 */
-
-          "dpa.w.ph         $ac0,         %[p1],          %[vector3b]     \n\t"
-          "dpa.w.ph         $ac0,         %[p2],          %[vector4b]     \n\t"
-          "extp             %[Temp1],     $ac0,           31              \n\t"
-          "dpa.w.ph         $ac1,         %[n1],          %[vector3b]     \n\t"
-          "dpa.w.ph         $ac1,         %[n2],          %[vector4b]     \n\t"
-          "extp             %[Temp2],     $ac1,           31              \n\t"
-
-          "preceu.ph.qbl    %[scratch1],  %[load1]                        \n\t"
-          "preceu.ph.qbl    %[p1],        %[load2]                        \n\t"
-          "precrq.ph.w      %[n1],        %[p1],          %[scratch1]     \n\t" /* pixel 2 */
-          "append           %[p1],        %[scratch1],    16              \n\t" /* pixel 1 */
-          "preceu.ph.qbl    %[scratch2],  %[load3]                        \n\t"
-          "preceu.ph.qbl    %[p2],        %[load4]                        \n\t"
-          "precrq.ph.w      %[n2],        %[p2],          %[scratch2]     \n\t" /* pixel 2 */
-          "append           %[p2],        %[scratch2],    16              \n\t" /* pixel 1 */
-
-          "lbux             %[store1],    %[Temp1](%[cm])                 \n\t"
-          "dpa.w.ph         $ac2,         %[p1],          %[vector3b]     \n\t"
-          "dpa.w.ph         $ac2,         %[p2],          %[vector4b]     \n\t"
-          "extp             %[Temp1],     $ac2,           31              \n\t"
-
-          "lbux             %[store2],    %[Temp2](%[cm])                 \n\t"
-          "dpa.w.ph         $ac3,         %[n1],          %[vector3b]     \n\t"
-          "dpa.w.ph         $ac3,         %[n2],          %[vector4b]     \n\t"
-          "extp             %[Temp2],     $ac3,           31              \n\t"
-
-          "sb               %[store1],    0(%[dst_ptr])                   \n\t"
-          "sb               %[store2],    1(%[dst_ptr])                   \n\t"
-
-          "lbux             %[store1],    %[Temp1](%[cm])                 \n\t"
-          "lbux             %[store2],    %[Temp2](%[cm])                 \n\t"
-
-          "sb               %[store1],    2(%[dst_ptr])                   \n\t"
-          "sb               %[store2],    3(%[dst_ptr])                   \n\t"
-
-          : [load1] "=&r" (load1), [load2] "=&r" (load2),
-            [load3] "=&r" (load3), [load4] "=&r" (load4),
-            [p1] "=&r" (p1), [p2] "=&r" (p2),
-            [n1] "=&r" (n1), [n2] "=&r" (n2),
-            [scratch1] "=&r" (scratch1), [scratch2] "=&r" (scratch2),
-            [Temp1] "=&r" (Temp1), [Temp2] "=&r" (Temp2),
-            [store1] "=&r" (store1), [store2] "=&r" (store2),
-            [src_ptr] "+r" (src_ptr)
-          : [vector1b] "r" (vector1b), [vector2b] "r" (vector2b),
-            [vector3b] "r" (vector3b), [vector4b] "r" (vector4b),
-            [vector4a] "r" (vector4a), [src_stride] "r" (src_stride),
-            [cm] "r" (cm), [dst_ptr] "r" (dst_ptr)
-      );
-    }
-
-    /* Next row... */
-    src += src_stride;
-    dst += dst_stride;
-  }
-}
-
-static void convolve_vert_64_dspr2(const uint8_t *src,
-                                   int32_t src_stride,
-                                   uint8_t *dst,
-                                   int32_t dst_stride,
-                                   const int16_t *filter_y,
-                                   int32_t h) {
-  int32_t x, y;
-  const uint8_t *src_ptr;
-  uint8_t *dst_ptr;
-  uint8_t *cm = vpx_ff_cropTbl;
-  uint32_t vector4a = 64;
-  uint32_t load1, load2, load3, load4;
-  uint32_t p1, p2;
-  uint32_t n1, n2;
-  uint32_t scratch1, scratch2;
-  uint32_t store1, store2;
-  int32_t vector1b, vector2b, vector3b, vector4b;
-  int32_t Temp1, Temp2;
-
-  vector1b = ((const int32_t *)filter_y)[0];
-  vector2b = ((const int32_t *)filter_y)[1];
-  vector3b = ((const int32_t *)filter_y)[2];
-  vector4b = ((const int32_t *)filter_y)[3];
-
-  src -= 3 * src_stride;
-
-  for (y = h; y--;) {
-    /* prefetch data to cache memory */
-    prefetch_store(dst + dst_stride);
-    prefetch_store(dst + dst_stride + 32);
-
-    for (x = 0; x < 64; x += 4) {
-      src_ptr = src + x;
-      dst_ptr = dst + x;
-
-      __asm__ __volatile__ (
-          "ulw              %[load1],     0(%[src_ptr])                   \n\t"
-          "add              %[src_ptr],   %[src_ptr],     %[src_stride]   \n\t"
-          "ulw              %[load2],     0(%[src_ptr])                   \n\t"
-          "add              %[src_ptr],   %[src_ptr],     %[src_stride]   \n\t"
-          "ulw              %[load3],     0(%[src_ptr])                   \n\t"
-          "add              %[src_ptr],   %[src_ptr],     %[src_stride]   \n\t"
-          "ulw              %[load4],     0(%[src_ptr])                   \n\t"
-
-          "mtlo             %[vector4a],  $ac0                            \n\t"
-          "mtlo             %[vector4a],  $ac1                            \n\t"
-          "mtlo             %[vector4a],  $ac2                            \n\t"
-          "mtlo             %[vector4a],  $ac3                            \n\t"
-          "mthi             $zero,        $ac0                            \n\t"
-          "mthi             $zero,        $ac1                            \n\t"
-          "mthi             $zero,        $ac2                            \n\t"
-          "mthi             $zero,        $ac3                            \n\t"
-
-          "preceu.ph.qbr    %[scratch1],  %[load1]                        \n\t"
-          "preceu.ph.qbr    %[p1],        %[load2]                        \n\t"
-          "precrq.ph.w      %[n1],        %[p1],          %[scratch1]     \n\t" /* pixel 2 */
-          "append           %[p1],        %[scratch1],    16              \n\t" /* pixel 1 */
-          "preceu.ph.qbr    %[scratch2],  %[load3]                        \n\t"
-          "preceu.ph.qbr    %[p2],        %[load4]                        \n\t"
-          "precrq.ph.w      %[n2],        %[p2],          %[scratch2]     \n\t" /* pixel 2 */
-          "append           %[p2],        %[scratch2],    16              \n\t" /* pixel 1 */
-
-          "dpa.w.ph         $ac0,         %[p1],          %[vector1b]     \n\t"
-          "dpa.w.ph         $ac0,         %[p2],          %[vector2b]     \n\t"
-          "dpa.w.ph         $ac1,         %[n1],          %[vector1b]     \n\t"
-          "dpa.w.ph         $ac1,         %[n2],          %[vector2b]     \n\t"
-
-          "preceu.ph.qbl    %[scratch1],  %[load1]                        \n\t"
-          "preceu.ph.qbl    %[p1],        %[load2]                        \n\t"
-          "precrq.ph.w      %[n1],        %[p1],          %[scratch1]     \n\t" /* pixel 2 */
-          "append           %[p1],        %[scratch1],    16              \n\t" /* pixel 1 */
-          "preceu.ph.qbl    %[scratch2],  %[load3]                        \n\t"
-          "preceu.ph.qbl    %[p2],        %[load4]                        \n\t"
-          "precrq.ph.w      %[n2],        %[p2],          %[scratch2]     \n\t" /* pixel 2 */
-          "append           %[p2],        %[scratch2],    16              \n\t" /* pixel 1 */
-
-          "dpa.w.ph         $ac2,         %[p1],          %[vector1b]     \n\t"
-          "dpa.w.ph         $ac2,         %[p2],          %[vector2b]     \n\t"
-          "dpa.w.ph         $ac3,         %[n1],          %[vector1b]     \n\t"
-          "dpa.w.ph         $ac3,         %[n2],          %[vector2b]     \n\t"
-
-          "add              %[src_ptr],   %[src_ptr],     %[src_stride]   \n\t"
-          "ulw              %[load1],     0(%[src_ptr])                   \n\t"
-          "add              %[src_ptr],   %[src_ptr],     %[src_stride]   \n\t"
-          "ulw              %[load2],     0(%[src_ptr])                   \n\t"
-          "add              %[src_ptr],   %[src_ptr],     %[src_stride]   \n\t"
-          "ulw              %[load3],     0(%[src_ptr])                   \n\t"
-          "add              %[src_ptr],   %[src_ptr],     %[src_stride]   \n\t"
-          "ulw              %[load4],     0(%[src_ptr])                   \n\t"
-
-          "preceu.ph.qbr    %[scratch1],  %[load1]                        \n\t"
-          "preceu.ph.qbr    %[p1],        %[load2]                        \n\t"
-          "precrq.ph.w      %[n1],        %[p1],          %[scratch1]     \n\t" /* pixel 2 */
-          "append           %[p1],        %[scratch1],    16              \n\t" /* pixel 1 */
-          "preceu.ph.qbr    %[scratch2],  %[load3]                        \n\t"
-          "preceu.ph.qbr    %[p2],        %[load4]                        \n\t"
-          "precrq.ph.w      %[n2],        %[p2],          %[scratch2]     \n\t" /* pixel 2 */
-          "append           %[p2],        %[scratch2],    16              \n\t" /* pixel 1 */
-
-          "dpa.w.ph         $ac0,         %[p1],          %[vector3b]     \n\t"
-          "dpa.w.ph         $ac0,         %[p2],          %[vector4b]     \n\t"
-          "extp             %[Temp1],     $ac0,           31              \n\t"
-          "dpa.w.ph         $ac1,         %[n1],          %[vector3b]     \n\t"
-          "dpa.w.ph         $ac1,         %[n2],          %[vector4b]     \n\t"
-          "extp             %[Temp2],     $ac1,           31              \n\t"
-
-          "preceu.ph.qbl    %[scratch1],  %[load1]                        \n\t"
-          "preceu.ph.qbl    %[p1],        %[load2]                        \n\t"
-          "precrq.ph.w      %[n1],        %[p1],          %[scratch1]     \n\t" /* pixel 2 */
-          "append           %[p1],        %[scratch1],    16              \n\t" /* pixel 1 */
-          "preceu.ph.qbl    %[scratch2],  %[load3]                        \n\t"
-          "preceu.ph.qbl    %[p2],        %[load4]                        \n\t"
-          "precrq.ph.w      %[n2],        %[p2],          %[scratch2]     \n\t" /* pixel 2 */
-          "append           %[p2],        %[scratch2],    16              \n\t" /* pixel 1 */
-
-          "lbux             %[store1],    %[Temp1](%[cm])                 \n\t"
-          "dpa.w.ph         $ac2,         %[p1],          %[vector3b]     \n\t"
-          "dpa.w.ph         $ac2,         %[p2],          %[vector4b]     \n\t"
-          "extp             %[Temp1],     $ac2,           31              \n\t"
-
-          "lbux             %[store2],    %[Temp2](%[cm])                 \n\t"
-          "dpa.w.ph         $ac3,         %[n1],          %[vector3b]     \n\t"
-          "dpa.w.ph         $ac3,         %[n2],          %[vector4b]     \n\t"
-          "extp             %[Temp2],     $ac3,           31              \n\t"
-
-          "sb               %[store1],    0(%[dst_ptr])                   \n\t"
-          "sb               %[store2],    1(%[dst_ptr])                   \n\t"
-
-          "lbux             %[store1],    %[Temp1](%[cm])                 \n\t"
-          "lbux             %[store2],    %[Temp2](%[cm])                 \n\t"
-
-          "sb               %[store1],    2(%[dst_ptr])                   \n\t"
-          "sb               %[store2],    3(%[dst_ptr])                   \n\t"
-
-          : [load1] "=&r" (load1), [load2] "=&r" (load2),
-            [load3] "=&r" (load3), [load4] "=&r" (load4),
-            [p1] "=&r" (p1), [p2] "=&r" (p2),
-            [n1] "=&r" (n1), [n2] "=&r" (n2),
-            [scratch1] "=&r" (scratch1), [scratch2] "=&r" (scratch2),
-            [Temp1] "=&r" (Temp1), [Temp2] "=&r" (Temp2),
-            [store1] "=&r" (store1), [store2] "=&r" (store2),
-            [src_ptr] "+r" (src_ptr)
-          : [vector1b] "r" (vector1b), [vector2b] "r" (vector2b),
-            [vector3b] "r" (vector3b), [vector4b] "r" (vector4b),
-            [vector4a] "r" (vector4a), [src_stride] "r" (src_stride),
-            [cm] "r" (cm), [dst_ptr] "r" (dst_ptr)
-      );
-    }
-
-    /* Next row... */
-    src += src_stride;
-    dst += dst_stride;
-  }
-}
-
-void vpx_convolve8_vert_dspr2(const uint8_t *src, ptrdiff_t src_stride,
-                              uint8_t *dst, ptrdiff_t dst_stride,
-                              const int16_t *filter_x, int x_step_q4,
-                              const int16_t *filter_y, int y_step_q4,
-                              int w, int h) {
-  if (((const int32_t *)filter_y)[1] == 0x800000) {
-    vpx_convolve_copy(src, src_stride,
-                      dst, dst_stride,
-                      filter_x, x_step_q4,
-                      filter_y, y_step_q4,
-                      w, h);
-  } else if (((const int32_t *)filter_y)[0] == 0) {
-    vpx_convolve2_vert_dspr2(src, src_stride,
-                             dst, dst_stride,
-                             filter_x, x_step_q4,
-                             filter_y, y_step_q4,
-                             w, h);
-  } else {
-    if (16 == y_step_q4) {
-      uint32_t pos = 38;
-
-      /* bit positon for extract from acc */
-      __asm__ __volatile__ (
-        "wrdsp      %[pos],     1           \n\t"
-        :
-        : [pos] "r" (pos)
-      );
-
-      prefetch_store(dst);
-
-      switch (w) {
-        case 4 :
-        case 8 :
-        case 16 :
-        case 32 :
-          convolve_vert_4_dspr2(src, src_stride,
-                                dst, dst_stride,
-                                filter_y, w, h);
-          break;
-        case 64 :
-          prefetch_store(dst + 32);
-          convolve_vert_64_dspr2(src, src_stride,
-                                 dst, dst_stride,
-                                 filter_y, h);
-          break;
-        default:
-          vpx_convolve8_vert_c(src, src_stride,
-                               dst, dst_stride,
-                               filter_x, x_step_q4,
-                               filter_y, y_step_q4,
-                               w, h);
-          break;
-      }
-    } else {
-      vpx_convolve8_vert_c(src, src_stride,
-                           dst, dst_stride,
-                           filter_x, x_step_q4,
-                           filter_y, y_step_q4,
-                           w, h);
-    }
-  }
-}
-
-#endif
--- a/vpx_dsp/vpx_dsp.mk
+++ b/vpx_dsp/vpx_dsp.mk
@@ -103,17 +103,17 @@
 DSP_SRCS-$(HAVE_MSA) += mips/vpx_convolve_msa.h
 
 # common (dspr2)
-DSP_SRCS-$(HAVE_DSPR2)  += mips/vpx_common_dspr2.h
-DSP_SRCS-$(HAVE_DSPR2)  += mips/vpx_convolve2_avg_dspr2.c
-DSP_SRCS-$(HAVE_DSPR2)  += mips/vpx_convolve2_avg_horiz_dspr2.c
-DSP_SRCS-$(HAVE_DSPR2)  += mips/vpx_convolve2_dspr2.c
-DSP_SRCS-$(HAVE_DSPR2)  += mips/vpx_convolve2_horiz_dspr2.c
-DSP_SRCS-$(HAVE_DSPR2)  += mips/vpx_convolve2_vert_dspr2.c
-DSP_SRCS-$(HAVE_DSPR2)  += mips/vpx_convolve8_avg_dspr2.c
-DSP_SRCS-$(HAVE_DSPR2)  += mips/vpx_convolve8_avg_horiz_dspr2.c
-DSP_SRCS-$(HAVE_DSPR2)  += mips/vpx_convolve8_dspr2.c
-DSP_SRCS-$(HAVE_DSPR2)  += mips/vpx_convolve8_horiz_dspr2.c
-DSP_SRCS-$(HAVE_DSPR2)  += mips/vpx_convolve8_vert_dspr2.c
+DSP_SRCS-$(HAVE_DSPR2)  += mips/convolve_common_dspr2.h
+DSP_SRCS-$(HAVE_DSPR2)  += mips/convolve2_avg_dspr2.c
+DSP_SRCS-$(HAVE_DSPR2)  += mips/convolve2_avg_horiz_dspr2.c
+DSP_SRCS-$(HAVE_DSPR2)  += mips/convolve2_dspr2.c
+DSP_SRCS-$(HAVE_DSPR2)  += mips/convolve2_horiz_dspr2.c
+DSP_SRCS-$(HAVE_DSPR2)  += mips/convolve2_vert_dspr2.c
+DSP_SRCS-$(HAVE_DSPR2)  += mips/convolve8_avg_dspr2.c
+DSP_SRCS-$(HAVE_DSPR2)  += mips/convolve8_avg_horiz_dspr2.c
+DSP_SRCS-$(HAVE_DSPR2)  += mips/convolve8_dspr2.c
+DSP_SRCS-$(HAVE_DSPR2)  += mips/convolve8_horiz_dspr2.c
+DSP_SRCS-$(HAVE_DSPR2)  += mips/convolve8_vert_dspr2.c
 
 # loop filters
 DSP_SRCS-yes += loopfilter.c