shithub: libvpx

Download patch

ref: 54a6f739585fc9879df0087304c6979a69c92710
parent: ebf7466cd8b884fd29be42ebe670317f5a7ca04d
author: Parag Salasakar <[email protected]>
date: Tue Jun 2 08:16:28 EDT 2015

mips msa vp9 idct4x4 and iwht4x4 optimization

average improvement ~3x-4x
moved assert to respective files

Change-Id: I6c915059d456a00bdd76fab0dd2eede8b6c6ea58

--- a/test/fdct4x4_test.cc
+++ b/test/fdct4x4_test.cc
@@ -536,4 +536,18 @@
         make_tuple(&vp9_fht4x4_sse2, &vp9_iht4x4_16_add_c, 2, VPX_BITS_8),
         make_tuple(&vp9_fht4x4_sse2, &vp9_iht4x4_16_add_c, 3, VPX_BITS_8)));
 #endif  // HAVE_SSE2 && CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
+
+#if HAVE_MSA && !CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
+INSTANTIATE_TEST_CASE_P(
+    MSA, Trans4x4DCT,
+    ::testing::Values(
+        make_tuple(&vp9_fdct4x4_c, &vp9_idct4x4_16_add_msa, 1, VPX_BITS_8)));
+INSTANTIATE_TEST_CASE_P(
+    MSA, Trans4x4HT,
+    ::testing::Values(
+        make_tuple(&vp9_fht4x4_c, &vp9_iht4x4_16_add_msa, 0, VPX_BITS_8),
+        make_tuple(&vp9_fht4x4_c, &vp9_iht4x4_16_add_msa, 1, VPX_BITS_8),
+        make_tuple(&vp9_fht4x4_c, &vp9_iht4x4_16_add_msa, 2, VPX_BITS_8),
+        make_tuple(&vp9_fht4x4_c, &vp9_iht4x4_16_add_msa, 3, VPX_BITS_8)));
+#endif  // HAVE_MSA && !CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
 }  // namespace
--- a/test/partial_idct_test.cc
+++ b/test/partial_idct_test.cc
@@ -332,7 +332,11 @@
         make_tuple(&vp9_fdct8x8_c,
                    &vp9_idct8x8_64_add_c,
                    &vp9_idct8x8_1_add_msa,
-                   TX_8X8, 1)));
+                   TX_8X8, 1),
+        make_tuple(&vp9_fdct4x4_c,
+                   &vp9_idct4x4_16_add_c,
+                   &vp9_idct4x4_1_add_msa,
+                   TX_4X4, 1)));
 #endif  // HAVE_MSA && !CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
 
 }  // namespace
--- a/vp9/common/mips/msa/vp9_idct16x16_msa.c
+++ b/vp9/common/mips/msa/vp9_idct16x16_msa.c
@@ -8,6 +8,7 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
+#include <assert.h>
 #include "vp9/common/mips/msa/vp9_idct_msa.h"
 
 void vp9_idct16_1d_rows_msa(const int16_t *input, int16_t *output) {
--- /dev/null
+++ b/vp9/common/mips/msa/vp9_idct4x4_msa.c
@@ -1,0 +1,147 @@
+/*
+ *  Copyright (c) 2015 The WebM project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <assert.h>
+#include "vp9/common/mips/msa/vp9_idct_msa.h"
+
+void vp9_iwht4x4_16_add_msa(const int16_t *input, uint8_t *dst,
+                            int32_t dst_stride) {
+  v8i16 in0, in1, in2, in3;
+  v4i32 in0_r, in1_r, in2_r, in3_r, in4_r;
+
+  /* load vector elements of 4x4 block */
+  LD4x4_SH(input, in0, in2, in3, in1);
+  TRANSPOSE4x4_SH_SH(in0, in2, in3, in1, in0, in2, in3, in1);
+  UNPCK_R_SH_SW(in0, in0_r);
+  UNPCK_R_SH_SW(in2, in2_r);
+  UNPCK_R_SH_SW(in3, in3_r);
+  UNPCK_R_SH_SW(in1, in1_r);
+  SRA_4V(in0_r, in1_r, in2_r, in3_r, UNIT_QUANT_SHIFT);
+
+  in0_r += in2_r;
+  in3_r -= in1_r;
+  in4_r = (in0_r - in3_r) >> 1;
+  in1_r = in4_r - in1_r;
+  in2_r = in4_r - in2_r;
+  in0_r -= in1_r;
+  in3_r += in2_r;
+
+  TRANSPOSE4x4_SW_SW(in0_r, in1_r, in2_r, in3_r, in0_r, in1_r, in2_r, in3_r);
+
+  in0_r += in1_r;
+  in2_r -= in3_r;
+  in4_r = (in0_r - in2_r) >> 1;
+  in3_r = in4_r - in3_r;
+  in1_r = in4_r - in1_r;
+  in0_r -= in3_r;
+  in2_r += in1_r;
+
+  PCKEV_H4_SH(in0_r, in0_r, in1_r, in1_r, in2_r, in2_r, in3_r, in3_r,
+              in0, in1, in2, in3);
+  ADDBLK_ST4x4_UB(in0, in3, in1, in2, dst, dst_stride);
+}
+
+void vp9_iwht4x4_1_add_msa(const int16_t *input, uint8_t *dst,
+                           int32_t dst_stride) {
+  int16_t a1, e1;
+  v8i16 in1, in0 = { 0 };
+
+  a1 = input[0] >> UNIT_QUANT_SHIFT;
+  e1 = a1 >> 1;
+  a1 -= e1;
+
+  in0 = __msa_insert_h(in0, 0, a1);
+  in0 = __msa_insert_h(in0, 1, e1);
+  in0 = __msa_insert_h(in0, 2, e1);
+  in0 = __msa_insert_h(in0, 3, e1);
+
+  in1 = in0 >> 1;
+  in0 -= in1;
+
+  ADDBLK_ST4x4_UB(in0, in1, in1, in1, dst, dst_stride);
+}
+
+void vp9_idct4x4_16_add_msa(const int16_t *input, uint8_t *dst,
+                            int32_t dst_stride) {
+  v8i16 in0, in1, in2, in3;
+
+  /* load vector elements of 4x4 block */
+  LD4x4_SH(input, in0, in1, in2, in3);
+  /* rows */
+  TRANSPOSE4x4_SH_SH(in0, in1, in2, in3, in0, in1, in2, in3);
+  VP9_IDCT4x4(in0, in1, in2, in3, in0, in1, in2, in3);
+  /* columns */
+  TRANSPOSE4x4_SH_SH(in0, in1, in2, in3, in0, in1, in2, in3);
+  VP9_IDCT4x4(in0, in1, in2, in3, in0, in1, in2, in3);
+  /* rounding (add 2^3, divide by 2^4) */
+  SRARI_H4_SH(in0, in1, in2, in3, 4);
+  ADDBLK_ST4x4_UB(in0, in1, in2, in3, dst, dst_stride);
+}
+
+void vp9_idct4x4_1_add_msa(const int16_t *input, uint8_t *dst,
+                           int32_t dst_stride) {
+  int16_t out;
+  v8i16 vec;
+
+  out = ROUND_POWER_OF_TWO((input[0] * cospi_16_64), DCT_CONST_BITS);
+  out = ROUND_POWER_OF_TWO((out * cospi_16_64), DCT_CONST_BITS);
+  out = ROUND_POWER_OF_TWO(out, 4);
+  vec = __msa_fill_h(out);
+
+  ADDBLK_ST4x4_UB(vec, vec, vec, vec, dst, dst_stride);
+}
+
+void vp9_iht4x4_16_add_msa(const int16_t *input, uint8_t *dst,
+                           int32_t dst_stride, int32_t tx_type) {
+  v8i16 in0, in1, in2, in3;
+
+  /* load vector elements of 4x4 block */
+  LD4x4_SH(input, in0, in1, in2, in3);
+  TRANSPOSE4x4_SH_SH(in0, in1, in2, in3, in0, in1, in2, in3);
+
+  switch (tx_type) {
+    case DCT_DCT:
+      /* DCT in horizontal */
+      VP9_IDCT4x4(in0, in1, in2, in3, in0, in1, in2, in3);
+      /* DCT in vertical */
+      TRANSPOSE4x4_SH_SH(in0, in1, in2, in3, in0, in1, in2, in3);
+      VP9_IDCT4x4(in0, in1, in2, in3, in0, in1, in2, in3);
+      break;
+    case ADST_DCT:
+      /* DCT in horizontal */
+      VP9_IDCT4x4(in0, in1, in2, in3, in0, in1, in2, in3);
+      /* ADST in vertical */
+      TRANSPOSE4x4_SH_SH(in0, in1, in2, in3, in0, in1, in2, in3);
+      VP9_IADST4x4(in0, in1, in2, in3, in0, in1, in2, in3);
+      break;
+    case DCT_ADST:
+      /* ADST in horizontal */
+      VP9_IADST4x4(in0, in1, in2, in3, in0, in1, in2, in3);
+      /* DCT in vertical */
+      TRANSPOSE4x4_SH_SH(in0, in1, in2, in3, in0, in1, in2, in3);
+      VP9_IDCT4x4(in0, in1, in2, in3, in0, in1, in2, in3);
+      break;
+    case ADST_ADST:
+      /* ADST in horizontal */
+      VP9_IADST4x4(in0, in1, in2, in3, in0, in1, in2, in3);
+      /* ADST in vertical */
+      TRANSPOSE4x4_SH_SH(in0, in1, in2, in3, in0, in1, in2, in3);
+      VP9_IADST4x4(in0, in1, in2, in3, in0, in1, in2, in3);
+      break;
+    default:
+      assert(0);
+      break;
+  }
+
+  /* final rounding (add 2^3, divide by 2^4) and shift */
+  SRARI_H4_SH(in0, in1, in2, in3, 4);
+  /* add block and store 4x4 */
+  ADDBLK_ST4x4_UB(in0, in1, in2, in3, dst, dst_stride);
+}
--- a/vp9/common/mips/msa/vp9_idct8x8_msa.c
+++ b/vp9/common/mips/msa/vp9_idct8x8_msa.c
@@ -8,6 +8,7 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
+#include <assert.h>
 #include "vp9/common/mips/msa/vp9_idct_msa.h"
 
 void vp9_idct8x8_64_add_msa(const int16_t *input, uint8_t *dst,
--- a/vp9/common/mips/msa/vp9_idct_msa.h
+++ b/vp9/common/mips/msa/vp9_idct_msa.h
@@ -11,8 +11,6 @@
 #ifndef VP9_COMMON_MIPS_MSA_VP9_IDCT_MSA_H_
 #define VP9_COMMON_MIPS_MSA_VP9_IDCT_MSA_H_
 
-#include <assert.h>
-
 #include "vpx_ports/mem.h"
 #include "vp9/common/vp9_idct.h"
 #include "vp9/common/mips/msa/vp9_macros_msa.h"
--- a/vp9/common/mips/msa/vp9_macros_msa.h
+++ b/vp9/common/mips/msa/vp9_macros_msa.h
@@ -228,6 +228,22 @@
 }
 #endif  // (__mips_isa_rev >= 6)
 
+/* Description : Load 4 words with stride
+   Arguments   : Inputs  - psrc    (source pointer to load from)
+                         - stride
+                 Outputs - out0, out1, out2, out3
+   Details     : Loads word in 'out0' from (psrc)
+                 Loads word in 'out1' from (psrc + stride)
+                 Loads word in 'out2' from (psrc + 2 * stride)
+                 Loads word in 'out3' from (psrc + 3 * stride)
+*/
+#define LW4(psrc, stride, out0, out1, out2, out3) {  \
+  out0 = LW((psrc));                                 \
+  out1 = LW((psrc) + stride);                        \
+  out2 = LW((psrc) + 2 * stride);                    \
+  out3 = LW((psrc) + 3 * stride);                    \
+}
+
 /* Description : Store 4 words with stride
    Arguments   : Inputs  - in0, in1, in2, in3, pdst, stride
    Details     : Stores word from 'in0' to (pdst)
@@ -336,6 +352,18 @@
 }
 #define LD_SH16(...) LD_H16(v8i16, __VA_ARGS__)
 
+/* Description : Load as 4x4 block of signed halfword elements from 1D source
+                 data into 4 vectors (Each vector with 4 signed halfwords)
+   Arguments   : Inputs  - psrc
+                 Outputs - out0, out1, out2, out3
+*/
+#define LD4x4_SH(psrc, out0, out1, out2, out3) {         \
+  out0 = LD_SH(psrc);                                    \
+  out2 = LD_SH(psrc + 8);                                \
+  out1 = (v8i16)__msa_ilvl_d((v2i64)out0, (v2i64)out0);  \
+  out3 = (v8i16)__msa_ilvl_d((v2i64)out2, (v2i64)out2);  \
+}
+
 /* Description : Store vectors of 16 byte elements with stride
    Arguments   : Inputs  - in0, in1, stride
                  Outputs - pdst    (destination pointer to store to)
@@ -681,6 +709,18 @@
   CLIP_SH2_0_255(in2, in3);                   \
 }
 
+/* Description : Insert specified word elements from input vectors to 1
+                 destination vector
+   Arguments   : Inputs  - in0, in1, in2, in3 (4 input vectors)
+                 Outputs - out                (output vector)
+                 Return Type - as per RTYPE
+*/
+#define INSERT_W2(RTYPE, in0, in1, out) {           \
+  out = (RTYPE)__msa_insert_w((v4i32)out, 0, in0);  \
+  out = (RTYPE)__msa_insert_w((v4i32)out, 1, in1);  \
+}
+#define INSERT_W2_SB(...) INSERT_W2(v16i8, __VA_ARGS__)
+
 /* Description : Interleave even byte elements from vectors
    Arguments   : Inputs  - in0, in1, in2, in3
                  Outputs - out0, out1
@@ -1098,6 +1138,23 @@
 }
 #define ADDS_SH4_SH(...) ADDS_SH4(v8i16, __VA_ARGS__)
 
+/* Description : Arithmetic shift right all elements of vector
+                 (generic for all data types)
+   Arguments   : Inputs  - in0, in1, in2, in3, shift
+                 Outputs - in0, in1, in2, in3 (in place)
+                 Return Type - as per input vector RTYPE
+   Details     : Each element of vector 'in0' is right shifted by 'shift' and
+                 result is in place written to 'in0'
+                 Here, 'shift' is GP variable passed in
+                 Similar for other pairs
+*/
+#define SRA_4V(in0, in1, in2, in3, shift) {  \
+  in0 = in0 >> shift;                        \
+  in1 = in1 >> shift;                        \
+  in2 = in2 >> shift;                        \
+  in3 = in3 >> shift;                        \
+}
+
 /* Description : Shift right arithmetic rounded (immediate)
    Arguments   : Inputs  - in0, in1, in2, in3, shift
                  Outputs - in0, in1, in2, in3 (in place)
@@ -1178,6 +1235,21 @@
   out3 = in6 - in7;                                   \
 }
 
+/* Description : Sign extend halfword elements from right half of the vector
+   Arguments   : Inputs  - in    (input halfword vector)
+                 Outputs - out   (sign extended word vectors)
+                 Return Type - signed word
+   Details     : Sign bit of halfword elements from input vector 'in' is
+                 extracted and interleaved with same vector 'in0' to generate
+                 4 word elements keeping sign intact
+*/
+#define UNPCK_R_SH_SW(in, out) {                 \
+  v8i16 sign_m;                                  \
+                                                 \
+  sign_m = __msa_clti_s_h((v8i16)in, 0);         \
+  out = (v4i32)__msa_ilvr_h(sign_m, (v8i16)in);  \
+}
+
 /* Description : Zero extend unsigned byte elements to halfword elements
    Arguments   : Inputs  - in           (1 input unsigned byte vector)
                  Outputs - out0, out1   (unsigned 2 halfword vectors)
@@ -1222,6 +1294,21 @@
   out7 = in0 - in7;                                                    \
 }
 
+/* Description : Transposes 4x4 block with half word elements in vectors
+   Arguments   : Inputs  - in0, in1, in2, in3
+                 Outputs - out0, out1, out2, out3
+                 Return Type - signed halfword
+   Details     :
+*/
+#define TRANSPOSE4x4_SH_SH(in0, in1, in2, in3, out0, out1, out2, out3) {  \
+  v8i16 s0_m, s1_m;                                                       \
+                                                                          \
+  ILVR_H2_SH(in1, in0, in3, in2, s0_m, s1_m);                             \
+  ILVRL_W2_SH(s1_m, s0_m, out0, out2);                                    \
+  out1 = (v8i16)__msa_ilvl_d((v2i64)out0, (v2i64)out0);                   \
+  out3 = (v8i16)__msa_ilvl_d((v2i64)out0, (v2i64)out2);                   \
+}
+
 /* Description : Transposes 4x8 block with half word elements in vectors
    Arguments   : Inputs  - in0, in1, in2, in3, in4, in5, in6, in7
                  Outputs - out0, out1, out2, out3, out4, out5, out6, out7
@@ -1293,6 +1380,55 @@
   out7 = (RTYPE)__msa_pckod_d((v2i64)tmp3_m, (v2i64)tmp7_m);              \
 }
 #define TRANSPOSE8x8_SH_SH(...) TRANSPOSE8x8_H(v8i16, __VA_ARGS__)
+
+/* Description : Transposes 4x4 block with word elements in vectors
+   Arguments   : Inputs  - in0, in1, in2, in3
+                 Outputs - out0, out1, out2, out3
+                 Return Type - signed word
+   Details     :
+*/
+#define TRANSPOSE4x4_SW_SW(in0, in1, in2, in3, out0, out1, out2, out3) {  \
+  v4i32 s0_m, s1_m, s2_m, s3_m;                                           \
+                                                                          \
+  ILVRL_W2_SW(in1, in0, s0_m, s1_m);                                      \
+  ILVRL_W2_SW(in3, in2, s2_m, s3_m);                                      \
+                                                                          \
+  out0 = (v4i32)__msa_ilvr_d((v2i64)s2_m, (v2i64)s0_m);                   \
+  out1 = (v4i32)__msa_ilvl_d((v2i64)s2_m, (v2i64)s0_m);                   \
+  out2 = (v4i32)__msa_ilvr_d((v2i64)s3_m, (v2i64)s1_m);                   \
+  out3 = (v4i32)__msa_ilvl_d((v2i64)s3_m, (v2i64)s1_m);                   \
+}
+
+/* Description : Add block 4x4
+   Arguments   : Inputs  - in0, in1, in2, in3, pdst, stride
+                 Outputs -
+                 Return Type - unsigned bytes
+   Details     : Least significant 4 bytes from each input vector are added to
+                 the destination bytes, clipped between 0-255 and then stored.
+*/
+#define ADDBLK_ST4x4_UB(in0, in1, in2, in3, pdst, stride) {     \
+  uint32_t src0_m, src1_m, src2_m, src3_m;                      \
+  uint32_t out0_m, out1_m, out2_m, out3_m;                      \
+  v8i16 inp0_m, inp1_m, res0_m, res1_m;                         \
+  v16i8 dst0_m = { 0 };                                         \
+  v16i8 dst1_m = { 0 };                                         \
+  v16i8 zero_m = { 0 };                                         \
+                                                                \
+  ILVR_D2_SH(in1, in0, in3, in2, inp0_m, inp1_m)                \
+  LW4(pdst, stride,  src0_m, src1_m, src2_m, src3_m);           \
+  INSERT_W2_SB(src0_m, src1_m, dst0_m);                         \
+  INSERT_W2_SB(src2_m, src3_m, dst1_m);                         \
+  ILVR_B2_SH(zero_m, dst0_m, zero_m, dst1_m, res0_m, res1_m);   \
+  ADD2(res0_m, inp0_m, res1_m, inp1_m, res0_m, res1_m);         \
+  CLIP_SH2_0_255(res0_m, res1_m);                               \
+  PCKEV_B2_SB(res0_m, res0_m, res1_m, res1_m, dst0_m, dst1_m);  \
+                                                                \
+  out0_m = __msa_copy_u_w((v4i32)dst0_m, 0);                    \
+  out1_m = __msa_copy_u_w((v4i32)dst0_m, 1);                    \
+  out2_m = __msa_copy_u_w((v4i32)dst1_m, 0);                    \
+  out3_m = __msa_copy_u_w((v4i32)dst1_m, 1);                    \
+  SW4(out0_m, out1_m, out2_m, out3_m, pdst, stride);            \
+}
 
 /* Description : Pack even elements of input vectors & xor with 128
    Arguments   : Inputs  - in0, in1
--- a/vp9/common/vp9_rtcd_defs.pl
+++ b/vp9/common/vp9_rtcd_defs.pl
@@ -419,10 +419,10 @@
     specialize qw/vp9_iwht4x4_16_add/;
   } else {
     add_proto qw/void vp9_idct4x4_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
-    specialize qw/vp9_idct4x4_1_add sse2 neon dspr2/;
+    specialize qw/vp9_idct4x4_1_add sse2 neon dspr2 msa/;
 
     add_proto qw/void vp9_idct4x4_16_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
-    specialize qw/vp9_idct4x4_16_add sse2 neon dspr2/;
+    specialize qw/vp9_idct4x4_16_add sse2 neon dspr2 msa/;
 
     add_proto qw/void vp9_idct8x8_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
     specialize qw/vp9_idct8x8_1_add sse2 neon dspr2 msa/;
@@ -454,7 +454,7 @@
     specialize qw/vp9_idct32x32_1_add sse2 neon dspr2 msa/;
 
     add_proto qw/void vp9_iht4x4_16_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type";
-    specialize qw/vp9_iht4x4_16_add sse2 neon dspr2/;
+    specialize qw/vp9_iht4x4_16_add sse2 neon dspr2 msa/;
 
     add_proto qw/void vp9_iht8x8_64_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type";
     specialize qw/vp9_iht8x8_64_add sse2 neon dspr2 msa/;
@@ -465,10 +465,10 @@
     # dct and add
 
     add_proto qw/void vp9_iwht4x4_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
-    specialize qw/vp9_iwht4x4_1_add/;
+    specialize qw/vp9_iwht4x4_1_add msa/;
 
     add_proto qw/void vp9_iwht4x4_16_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
-    specialize qw/vp9_iwht4x4_16_add/;
+    specialize qw/vp9_iwht4x4_16_add msa/;
   }
 }
 
--- a/vp9/vp9_common.mk
+++ b/vp9/vp9_common.mk
@@ -138,6 +138,7 @@
 VP9_COMMON_SRCS-$(HAVE_MSA) += common/mips/msa/vp9_convolve_avg_msa.c
 VP9_COMMON_SRCS-$(HAVE_MSA) += common/mips/msa/vp9_convolve_copy_msa.c
 VP9_COMMON_SRCS-$(HAVE_MSA) += common/mips/msa/vp9_convolve_msa.h
+VP9_COMMON_SRCS-$(HAVE_MSA) += common/mips/msa/vp9_idct4x4_msa.c
 VP9_COMMON_SRCS-$(HAVE_MSA) += common/mips/msa/vp9_idct8x8_msa.c
 VP9_COMMON_SRCS-$(HAVE_MSA) += common/mips/msa/vp9_idct16x16_msa.c
 VP9_COMMON_SRCS-$(HAVE_MSA) += common/mips/msa/vp9_idct32x32_msa.c