ref: c1a90dc160d1c14d973526528f92494c76982a28
parent: 25301a84a8fe65f57cfe7d67001912b44f5085c8
parent: bd86de1ac8f1d3f5fa894e2605821914c2d9ff22
author: Yi Luo <[email protected]>
date: Tue Feb 14 15:13:26 EST 2017
Merge "Replace idct32x32_34_add_ssse3 assembly with intrinsics"
--- a/vpx_dsp/vpx_dsp_rtcd_defs.pl
+++ b/vpx_dsp/vpx_dsp_rtcd_defs.pl
@@ -806,7 +806,7 @@
$vpx_idct32x32_135_add_msa=vpx_idct32x32_1024_add_msa;
add_proto qw/void vpx_idct32x32_34_add/, "const tran_low_t *input, uint8_t *dest, int stride";
- specialize qw/vpx_idct32x32_34_add sse2 neon dspr2 msa/, "$ssse3_x86_64";
+ specialize qw/vpx_idct32x32_34_add sse2 ssse3 neon dspr2 msa/;
add_proto qw/void vpx_idct32x32_1_add/, "const tran_low_t *input, uint8_t *dest, int stride";
specialize qw/vpx_idct32x32_1_add sse2 neon dspr2 msa/;
--- a/vpx_dsp/x86/inv_txfm_sse2.c
+++ b/vpx_dsp/x86/inv_txfm_sse2.c
@@ -263,43 +263,6 @@
in[1] = _mm_packs_epi32(u[2], u[3]);
}
-// Define Macro for multiplying elements by constants and adding them together.
-#define MULTIPLICATION_AND_ADD(lo_0, hi_0, lo_1, hi_1, cst0, cst1, cst2, cst3, \
- res0, res1, res2, res3) \
- { \
- tmp0 = _mm_madd_epi16(lo_0, cst0); \
- tmp1 = _mm_madd_epi16(hi_0, cst0); \
- tmp2 = _mm_madd_epi16(lo_0, cst1); \
- tmp3 = _mm_madd_epi16(hi_0, cst1); \
- tmp4 = _mm_madd_epi16(lo_1, cst2); \
- tmp5 = _mm_madd_epi16(hi_1, cst2); \
- tmp6 = _mm_madd_epi16(lo_1, cst3); \
- tmp7 = _mm_madd_epi16(hi_1, cst3); \
- \
- tmp0 = _mm_add_epi32(tmp0, rounding); \
- tmp1 = _mm_add_epi32(tmp1, rounding); \
- tmp2 = _mm_add_epi32(tmp2, rounding); \
- tmp3 = _mm_add_epi32(tmp3, rounding); \
- tmp4 = _mm_add_epi32(tmp4, rounding); \
- tmp5 = _mm_add_epi32(tmp5, rounding); \
- tmp6 = _mm_add_epi32(tmp6, rounding); \
- tmp7 = _mm_add_epi32(tmp7, rounding); \
- \
- tmp0 = _mm_srai_epi32(tmp0, DCT_CONST_BITS); \
- tmp1 = _mm_srai_epi32(tmp1, DCT_CONST_BITS); \
- tmp2 = _mm_srai_epi32(tmp2, DCT_CONST_BITS); \
- tmp3 = _mm_srai_epi32(tmp3, DCT_CONST_BITS); \
- tmp4 = _mm_srai_epi32(tmp4, DCT_CONST_BITS); \
- tmp5 = _mm_srai_epi32(tmp5, DCT_CONST_BITS); \
- tmp6 = _mm_srai_epi32(tmp6, DCT_CONST_BITS); \
- tmp7 = _mm_srai_epi32(tmp7, DCT_CONST_BITS); \
- \
- res0 = _mm_packs_epi32(tmp0, tmp1); \
- res1 = _mm_packs_epi32(tmp2, tmp3); \
- res2 = _mm_packs_epi32(tmp4, tmp5); \
- res3 = _mm_packs_epi32(tmp6, tmp7); \
- }
-
#define MULTIPLICATION_AND_ADD_2(lo_0, hi_0, cst0, cst1, res0, res1) \
{ \
tmp0 = _mm_madd_epi16(lo_0, cst0); \
--- a/vpx_dsp/x86/inv_txfm_sse2.h
+++ b/vpx_dsp/x86/inv_txfm_sse2.h
@@ -242,6 +242,43 @@
out1 = _mm_unpackhi_epi32(tr0_0, tr0_1); \
}
+// Define Macro for multiplying elements by constants and adding them together.
+#define MULTIPLICATION_AND_ADD(lo_0, hi_0, lo_1, hi_1, cst0, cst1, cst2, cst3, \
+ res0, res1, res2, res3) \
+ { \
+ tmp0 = _mm_madd_epi16(lo_0, cst0); \
+ tmp1 = _mm_madd_epi16(hi_0, cst0); \
+ tmp2 = _mm_madd_epi16(lo_0, cst1); \
+ tmp3 = _mm_madd_epi16(hi_0, cst1); \
+ tmp4 = _mm_madd_epi16(lo_1, cst2); \
+ tmp5 = _mm_madd_epi16(hi_1, cst2); \
+ tmp6 = _mm_madd_epi16(lo_1, cst3); \
+ tmp7 = _mm_madd_epi16(hi_1, cst3); \
+ \
+ tmp0 = _mm_add_epi32(tmp0, rounding); \
+ tmp1 = _mm_add_epi32(tmp1, rounding); \
+ tmp2 = _mm_add_epi32(tmp2, rounding); \
+ tmp3 = _mm_add_epi32(tmp3, rounding); \
+ tmp4 = _mm_add_epi32(tmp4, rounding); \
+ tmp5 = _mm_add_epi32(tmp5, rounding); \
+ tmp6 = _mm_add_epi32(tmp6, rounding); \
+ tmp7 = _mm_add_epi32(tmp7, rounding); \
+ \
+ tmp0 = _mm_srai_epi32(tmp0, DCT_CONST_BITS); \
+ tmp1 = _mm_srai_epi32(tmp1, DCT_CONST_BITS); \
+ tmp2 = _mm_srai_epi32(tmp2, DCT_CONST_BITS); \
+ tmp3 = _mm_srai_epi32(tmp3, DCT_CONST_BITS); \
+ tmp4 = _mm_srai_epi32(tmp4, DCT_CONST_BITS); \
+ tmp5 = _mm_srai_epi32(tmp5, DCT_CONST_BITS); \
+ tmp6 = _mm_srai_epi32(tmp6, DCT_CONST_BITS); \
+ tmp7 = _mm_srai_epi32(tmp7, DCT_CONST_BITS); \
+ \
+ res0 = _mm_packs_epi32(tmp0, tmp1); \
+ res1 = _mm_packs_epi32(tmp2, tmp3); \
+ res2 = _mm_packs_epi32(tmp4, tmp5); \
+ res3 = _mm_packs_epi32(tmp6, tmp7); \
+ }
+
void idct4_sse2(__m128i *in);
void idct8_sse2(__m128i *in);
void idct16_sse2(__m128i *in0, __m128i *in1);
--- a/vpx_dsp/x86/inv_txfm_ssse3.c
+++ b/vpx_dsp/x86/inv_txfm_ssse3.c
@@ -322,3 +322,433 @@
RECON_AND_STORE(dest + 6 * stride, in6);
RECON_AND_STORE(dest + 7 * stride, in7);
}
+
+static INLINE void idct32_34(const __m128i *in, __m128i *stp1) {
+ const __m128i rounding = _mm_set1_epi32(DCT_CONST_ROUNDING);
+ // idct constants for each stage
+ const __m128i stk1_0 = pair_set_epi16(2 * cospi_31_64, 2 * cospi_31_64);
+ const __m128i stk1_1 = pair_set_epi16(2 * cospi_1_64, 2 * cospi_1_64);
+ const __m128i stk1_6 = pair_set_epi16(-2 * cospi_25_64, -2 * cospi_25_64);
+ const __m128i stk1_7 = pair_set_epi16(2 * cospi_7_64, 2 * cospi_7_64);
+ const __m128i stk1_8 = pair_set_epi16(2 * cospi_27_64, 2 * cospi_27_64);
+ const __m128i stk1_9 = pair_set_epi16(2 * cospi_5_64, 2 * cospi_5_64);
+ const __m128i stk1_14 = pair_set_epi16(-2 * cospi_29_64, -2 * cospi_29_64);
+ const __m128i stk1_15 = pair_set_epi16(2 * cospi_3_64, 2 * cospi_3_64);
+
+ const __m128i stk2_0 = pair_set_epi16(2 * cospi_30_64, 2 * cospi_30_64);
+ const __m128i stk2_1 = pair_set_epi16(2 * cospi_2_64, 2 * cospi_2_64);
+ const __m128i stk2_6 = pair_set_epi16(-2 * cospi_26_64, -2 * cospi_26_64);
+ const __m128i stk2_7 = pair_set_epi16(2 * cospi_6_64, 2 * cospi_6_64);
+
+ const __m128i stk3_0 = pair_set_epi16(2 * cospi_28_64, 2 * cospi_28_64);
+ const __m128i stk3_1 = pair_set_epi16(2 * cospi_4_64, 2 * cospi_4_64);
+ const __m128i stg3_4 = pair_set_epi16(-cospi_4_64, cospi_28_64);
+ const __m128i stg3_5 = pair_set_epi16(cospi_28_64, cospi_4_64);
+ const __m128i stg3_6 = pair_set_epi16(-cospi_28_64, -cospi_4_64);
+ const __m128i stg3_8 = pair_set_epi16(-cospi_20_64, cospi_12_64);
+ const __m128i stg3_9 = pair_set_epi16(cospi_12_64, cospi_20_64);
+ const __m128i stg3_10 = pair_set_epi16(-cospi_12_64, -cospi_20_64);
+
+ const __m128i stg4_0 = pair_set_epi16(cospi_16_64, cospi_16_64);
+ const __m128i stk4_0 = pair_set_epi16(2 * cospi_16_64, 2 * cospi_16_64);
+ const __m128i stg4_1 = pair_set_epi16(cospi_16_64, -cospi_16_64);
+ const __m128i stg4_4 = pair_set_epi16(-cospi_8_64, cospi_24_64);
+ const __m128i stg4_5 = pair_set_epi16(cospi_24_64, cospi_8_64);
+ const __m128i stg4_6 = pair_set_epi16(-cospi_24_64, -cospi_8_64);
+
+ const __m128i stg6_0 = pair_set_epi16(-cospi_16_64, cospi_16_64);
+ __m128i stp2_0, stp2_1, stp2_2, stp2_3, stp2_4, stp2_5, stp2_6, stp2_7,
+ stp2_8, stp2_9, stp2_10, stp2_11, stp2_12, stp2_13, stp2_14, stp2_15,
+ stp2_16, stp2_17, stp2_18, stp2_19, stp2_20, stp2_21, stp2_22, stp2_23,
+ stp2_24, stp2_25, stp2_26, stp2_27, stp2_28, stp2_29, stp2_30, stp2_31;
+ __m128i tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7;
+
+ /* Stage1 */
+
+ stp1[16] = _mm_mulhrs_epi16(in[1], stk1_0);
+ stp1[31] = _mm_mulhrs_epi16(in[1], stk1_1);
+
+ stp1[19] = _mm_mulhrs_epi16(in[7], stk1_6);
+ stp1[28] = _mm_mulhrs_epi16(in[7], stk1_7);
+
+ stp1[20] = _mm_mulhrs_epi16(in[5], stk1_8);
+ stp1[27] = _mm_mulhrs_epi16(in[5], stk1_9);
+
+ stp1[23] = _mm_mulhrs_epi16(in[3], stk1_14);
+ stp1[24] = _mm_mulhrs_epi16(in[3], stk1_15);
+
+ /* Stage2 */
+
+ stp2_8 = _mm_mulhrs_epi16(in[2], stk2_0);
+ stp2_15 = _mm_mulhrs_epi16(in[2], stk2_1);
+
+ stp2_11 = _mm_mulhrs_epi16(in[6], stk2_6);
+ stp2_12 = _mm_mulhrs_epi16(in[6], stk2_7);
+
+ /* Stage3 */
+ {
+ const __m128i lo_17_30 = _mm_unpacklo_epi16(stp1[16], stp1[31]);
+ const __m128i hi_17_30 = _mm_unpackhi_epi16(stp1[16], stp1[31]);
+ const __m128i lo_18_29 = _mm_unpacklo_epi16(stp1[19], stp1[28]);
+ const __m128i hi_18_29 = _mm_unpackhi_epi16(stp1[19], stp1[28]);
+
+ const __m128i lo_21_26 = _mm_unpacklo_epi16(stp1[20], stp1[27]);
+ const __m128i hi_21_26 = _mm_unpackhi_epi16(stp1[20], stp1[27]);
+ const __m128i lo_22_25 = _mm_unpacklo_epi16(stp1[23], stp1[24]);
+ const __m128i hi_22_25 = _mm_unpackhi_epi16(stp1[23], stp1[24]);
+
+ stp1[4] = _mm_mulhrs_epi16(in[4], stk3_0);
+ stp1[7] = _mm_mulhrs_epi16(in[4], stk3_1);
+
+ MULTIPLICATION_AND_ADD(lo_17_30, hi_17_30, lo_18_29, hi_18_29, stg3_4,
+ stg3_5, stg3_6, stg3_4, stp1[17], stp1[30], stp1[18],
+ stp1[29])
+ MULTIPLICATION_AND_ADD(lo_21_26, hi_21_26, lo_22_25, hi_22_25, stg3_8,
+ stg3_9, stg3_10, stg3_8, stp1[21], stp1[26],
+ stp1[22], stp1[25])
+ }
+
+ /* Stage4 */
+ {
+ const __m128i lo_9_14 = _mm_unpacklo_epi16(stp2_8, stp2_15);
+ const __m128i hi_9_14 = _mm_unpackhi_epi16(stp2_8, stp2_15);
+ const __m128i lo_10_13 = _mm_unpacklo_epi16(stp2_11, stp2_12);
+ const __m128i hi_10_13 = _mm_unpackhi_epi16(stp2_11, stp2_12);
+
+ stp1[0] = _mm_mulhrs_epi16(in[0], stk4_0);
+ stp1[1] = _mm_mulhrs_epi16(in[0], stk4_0); // stk4_1 = stk4_0
+ stp1[2] = stp1[0];
+ stp1[3] = stp1[1];
+
+ MULTIPLICATION_AND_ADD(lo_9_14, hi_9_14, lo_10_13, hi_10_13, stg4_4, stg4_5,
+ stg4_6, stg4_4, stp2_9, stp2_14, stp2_10, stp2_13)
+
+ stp2_16 = _mm_add_epi16(stp1[16], stp1[19]);
+ stp2_17 = _mm_add_epi16(stp1[17], stp1[18]);
+ stp2_18 = _mm_sub_epi16(stp1[17], stp1[18]);
+ stp2_19 = _mm_sub_epi16(stp1[16], stp1[19]);
+ stp2_20 = _mm_sub_epi16(stp1[23], stp1[20]);
+ stp2_21 = _mm_sub_epi16(stp1[22], stp1[21]);
+ stp2_22 = _mm_add_epi16(stp1[22], stp1[21]);
+ stp2_23 = _mm_add_epi16(stp1[23], stp1[20]);
+
+ stp2_24 = _mm_add_epi16(stp1[24], stp1[27]);
+ stp2_25 = _mm_add_epi16(stp1[25], stp1[26]);
+ stp2_26 = _mm_sub_epi16(stp1[25], stp1[26]);
+ stp2_27 = _mm_sub_epi16(stp1[24], stp1[27]);
+ stp2_28 = _mm_sub_epi16(stp1[31], stp1[28]);
+ stp2_29 = _mm_sub_epi16(stp1[30], stp1[29]);
+ stp2_30 = _mm_add_epi16(stp1[29], stp1[30]);
+ stp2_31 = _mm_add_epi16(stp1[28], stp1[31]);
+ }
+
+ /* Stage5 */
+ {
+ // Note:
+ // #define AVOID_OVERFLOW = 0, code would be faster. But it can't pass
+ // SingleExtreme test. The MaxSupportedCoeff/MinSupportedCoeff must drop
+ // to 23198 and -23197, respectively.
+#define AVOID_OVERFLOW (1)
+
+#if AVOID_OVERFLOW
+ const __m128i lo_6_5 = _mm_unpacklo_epi16(stp1[7], stp1[4]);
+ const __m128i hi_6_5 = _mm_unpackhi_epi16(stp1[7], stp1[4]);
+#endif
+ const __m128i lo_18_29 = _mm_unpacklo_epi16(stp2_18, stp2_29);
+ const __m128i hi_18_29 = _mm_unpackhi_epi16(stp2_18, stp2_29);
+
+ const __m128i lo_19_28 = _mm_unpacklo_epi16(stp2_19, stp2_28);
+ const __m128i hi_19_28 = _mm_unpackhi_epi16(stp2_19, stp2_28);
+ const __m128i lo_20_27 = _mm_unpacklo_epi16(stp2_20, stp2_27);
+ const __m128i hi_20_27 = _mm_unpackhi_epi16(stp2_20, stp2_27);
+
+ const __m128i lo_21_26 = _mm_unpacklo_epi16(stp2_21, stp2_26);
+ const __m128i hi_21_26 = _mm_unpackhi_epi16(stp2_21, stp2_26);
+
+#if AVOID_OVERFLOW
+ tmp0 = _mm_madd_epi16(lo_6_5, stg4_1);
+ tmp1 = _mm_madd_epi16(hi_6_5, stg4_1);
+ tmp2 = _mm_madd_epi16(lo_6_5, stg4_0);
+ tmp3 = _mm_madd_epi16(hi_6_5, stg4_0);
+
+ tmp0 = _mm_add_epi32(tmp0, rounding);
+ tmp1 = _mm_add_epi32(tmp1, rounding);
+ tmp2 = _mm_add_epi32(tmp2, rounding);
+ tmp3 = _mm_add_epi32(tmp3, rounding);
+
+ tmp0 = _mm_srai_epi32(tmp0, DCT_CONST_BITS);
+ tmp1 = _mm_srai_epi32(tmp1, DCT_CONST_BITS);
+ tmp2 = _mm_srai_epi32(tmp2, DCT_CONST_BITS);
+ tmp3 = _mm_srai_epi32(tmp3, DCT_CONST_BITS);
+
+ stp1[5] = _mm_packs_epi32(tmp0, tmp1);
+ stp1[6] = _mm_packs_epi32(tmp2, tmp3);
+#else
+ tmp0 = _mm_sub_epi16(stp1[7], stp1[4]);
+ tmp1 = _mm_adds_epi16(stp1[7], stp1[4]);
+ stp1[5] = _mm_mulhrs_epi16(tmp0, stk4_0);
+ stp1[6] = _mm_mulhrs_epi16(tmp1, stk4_0);
+#endif
+
+ stp1[8] = _mm_add_epi16(stp2_8, stp2_11);
+ stp1[9] = _mm_add_epi16(stp2_9, stp2_10);
+ stp1[10] = _mm_sub_epi16(stp2_9, stp2_10);
+ stp1[11] = _mm_sub_epi16(stp2_8, stp2_11);
+ stp1[12] = _mm_sub_epi16(stp2_15, stp2_12);
+ stp1[13] = _mm_sub_epi16(stp2_14, stp2_13);
+ stp1[14] = _mm_add_epi16(stp2_14, stp2_13);
+ stp1[15] = _mm_add_epi16(stp2_15, stp2_12);
+
+ MULTIPLICATION_AND_ADD(lo_18_29, hi_18_29, lo_19_28, hi_19_28, stg4_4,
+ stg4_5, stg4_4, stg4_5, stp1[18], stp1[29], stp1[19],
+ stp1[28])
+ MULTIPLICATION_AND_ADD(lo_20_27, hi_20_27, lo_21_26, hi_21_26, stg4_6,
+ stg4_4, stg4_6, stg4_4, stp1[20], stp1[27], stp1[21],
+ stp1[26])
+
+ stp1[16] = stp2_16;
+ stp1[17] = stp2_17;
+ stp1[22] = stp2_22;
+ stp1[23] = stp2_23;
+ stp1[24] = stp2_24;
+ stp1[25] = stp2_25;
+ stp1[30] = stp2_30;
+ stp1[31] = stp2_31;
+ }
+
+ /* Stage6 */
+ {
+#if AVOID_OVERFLOW
+ const __m128i lo_10_13 = _mm_unpacklo_epi16(stp1[10], stp1[13]);
+ const __m128i hi_10_13 = _mm_unpackhi_epi16(stp1[10], stp1[13]);
+ const __m128i lo_11_12 = _mm_unpacklo_epi16(stp1[11], stp1[12]);
+ const __m128i hi_11_12 = _mm_unpackhi_epi16(stp1[11], stp1[12]);
+#endif
+
+ stp2_0 = _mm_add_epi16(stp1[0], stp1[7]);
+ stp2_1 = _mm_add_epi16(stp1[1], stp1[6]);
+ stp2_2 = _mm_add_epi16(stp1[2], stp1[5]);
+ stp2_3 = _mm_add_epi16(stp1[3], stp1[4]);
+ stp2_4 = _mm_sub_epi16(stp1[3], stp1[4]);
+ stp2_5 = _mm_sub_epi16(stp1[2], stp1[5]);
+ stp2_6 = _mm_sub_epi16(stp1[1], stp1[6]);
+ stp2_7 = _mm_sub_epi16(stp1[0], stp1[7]);
+
+ stp2_8 = stp1[8];
+ stp2_9 = stp1[9];
+ stp2_14 = stp1[14];
+ stp2_15 = stp1[15];
+
+#if AVOID_OVERFLOW
+ MULTIPLICATION_AND_ADD(lo_10_13, hi_10_13, lo_11_12, hi_11_12, stg6_0,
+ stg4_0, stg6_0, stg4_0, stp2_10, stp2_13, stp2_11,
+ stp2_12)
+#else
+ tmp0 = _mm_add_epi16(stp1[10], stp1[13]);
+ tmp1 = _mm_sub_epi16(stp1[13], stp1[10]);
+ tmp2 = _mm_add_epi16(stp1[11], stp1[12]);
+ tmp3 = _mm_sub_epi16(stp1[12], stp1[11]);
+
+ stp2_10 = _mm_mulhrs_epi16(tmp1, stk4_0);
+ stp2_13 = _mm_mulhrs_epi16(tmp0, stk4_0);
+ stp2_11 = _mm_mulhrs_epi16(tmp3, stk4_0);
+ stp2_12 = _mm_mulhrs_epi16(tmp2, stk4_0);
+
+#endif
+
+ stp2_16 = _mm_add_epi16(stp1[16], stp1[23]);
+ stp2_17 = _mm_add_epi16(stp1[17], stp1[22]);
+ stp2_18 = _mm_add_epi16(stp1[18], stp1[21]);
+ stp2_19 = _mm_add_epi16(stp1[19], stp1[20]);
+ stp2_20 = _mm_sub_epi16(stp1[19], stp1[20]);
+ stp2_21 = _mm_sub_epi16(stp1[18], stp1[21]);
+ stp2_22 = _mm_sub_epi16(stp1[17], stp1[22]);
+ stp2_23 = _mm_sub_epi16(stp1[16], stp1[23]);
+
+ stp2_24 = _mm_sub_epi16(stp1[31], stp1[24]);
+ stp2_25 = _mm_sub_epi16(stp1[30], stp1[25]);
+ stp2_26 = _mm_sub_epi16(stp1[29], stp1[26]);
+ stp2_27 = _mm_sub_epi16(stp1[28], stp1[27]);
+ stp2_28 = _mm_add_epi16(stp1[27], stp1[28]);
+ stp2_29 = _mm_add_epi16(stp1[26], stp1[29]);
+ stp2_30 = _mm_add_epi16(stp1[25], stp1[30]);
+ stp2_31 = _mm_add_epi16(stp1[24], stp1[31]);
+ }
+
+ /* Stage7 */
+ {
+#if AVOID_OVERFLOW
+ const __m128i lo_20_27 = _mm_unpacklo_epi16(stp2_20, stp2_27);
+ const __m128i hi_20_27 = _mm_unpackhi_epi16(stp2_20, stp2_27);
+ const __m128i lo_21_26 = _mm_unpacklo_epi16(stp2_21, stp2_26);
+ const __m128i hi_21_26 = _mm_unpackhi_epi16(stp2_21, stp2_26);
+
+ const __m128i lo_22_25 = _mm_unpacklo_epi16(stp2_22, stp2_25);
+ const __m128i hi_22_25 = _mm_unpackhi_epi16(stp2_22, stp2_25);
+ const __m128i lo_23_24 = _mm_unpacklo_epi16(stp2_23, stp2_24);
+ const __m128i hi_23_24 = _mm_unpackhi_epi16(stp2_23, stp2_24);
+#endif
+ stp1[0] = _mm_add_epi16(stp2_0, stp2_15);
+ stp1[1] = _mm_add_epi16(stp2_1, stp2_14);
+ stp1[2] = _mm_add_epi16(stp2_2, stp2_13);
+ stp1[3] = _mm_add_epi16(stp2_3, stp2_12);
+ stp1[4] = _mm_add_epi16(stp2_4, stp2_11);
+ stp1[5] = _mm_add_epi16(stp2_5, stp2_10);
+ stp1[6] = _mm_add_epi16(stp2_6, stp2_9);
+ stp1[7] = _mm_add_epi16(stp2_7, stp2_8);
+ stp1[8] = _mm_sub_epi16(stp2_7, stp2_8);
+ stp1[9] = _mm_sub_epi16(stp2_6, stp2_9);
+ stp1[10] = _mm_sub_epi16(stp2_5, stp2_10);
+ stp1[11] = _mm_sub_epi16(stp2_4, stp2_11);
+ stp1[12] = _mm_sub_epi16(stp2_3, stp2_12);
+ stp1[13] = _mm_sub_epi16(stp2_2, stp2_13);
+ stp1[14] = _mm_sub_epi16(stp2_1, stp2_14);
+ stp1[15] = _mm_sub_epi16(stp2_0, stp2_15);
+
+ stp1[16] = stp2_16;
+ stp1[17] = stp2_17;
+ stp1[18] = stp2_18;
+ stp1[19] = stp2_19;
+
+#if AVOID_OVERFLOW
+ MULTIPLICATION_AND_ADD(lo_20_27, hi_20_27, lo_21_26, hi_21_26, stg6_0,
+ stg4_0, stg6_0, stg4_0, stp1[20], stp1[27], stp1[21],
+ stp1[26])
+ MULTIPLICATION_AND_ADD(lo_22_25, hi_22_25, lo_23_24, hi_23_24, stg6_0,
+ stg4_0, stg6_0, stg4_0, stp1[22], stp1[25], stp1[23],
+ stp1[24])
+#else
+ tmp0 = _mm_add_epi16(stp2_20, stp2_27);
+ tmp1 = _mm_sub_epi16(stp2_27, stp2_20);
+ tmp2 = _mm_add_epi16(stp2_21, stp2_26);
+ tmp3 = _mm_sub_epi16(stp2_26, stp2_21);
+
+ stp1[20] = _mm_mulhrs_epi16(tmp1, stk4_0);
+ stp1[27] = _mm_mulhrs_epi16(tmp0, stk4_0);
+ stp1[21] = _mm_mulhrs_epi16(tmp3, stk4_0);
+ stp1[26] = _mm_mulhrs_epi16(tmp2, stk4_0);
+
+ tmp0 = _mm_add_epi16(stp2_22, stp2_25);
+ tmp1 = _mm_sub_epi16(stp2_25, stp2_22);
+ tmp2 = _mm_add_epi16(stp2_23, stp2_24);
+ tmp3 = _mm_sub_epi16(stp2_24, stp2_23);
+
+ stp1[22] = _mm_mulhrs_epi16(tmp1, stk4_0);
+ stp1[25] = _mm_mulhrs_epi16(tmp0, stk4_0);
+ stp1[23] = _mm_mulhrs_epi16(tmp3, stk4_0);
+ stp1[24] = _mm_mulhrs_epi16(tmp2, stk4_0);
+#endif
+
+ stp1[28] = stp2_28;
+ stp1[29] = stp2_29;
+ stp1[30] = stp2_30;
+ stp1[31] = stp2_31;
+ }
+#undef AVOID_OVERFLOW
+}
+
+// Only upper-left 8x8 has non-zero coeff
+void vpx_idct32x32_34_add_ssse3(const tran_low_t *input, uint8_t *dest,
+ int stride) {
+ const __m128i zero = _mm_setzero_si128();
+ const __m128i final_rounding = _mm_set1_epi16(1 << 5);
+ __m128i in[32], col[32];
+ __m128i stp1[32];
+ int i;
+
+ // Load input data. Only need to load the top left 8x8 block.
+ in[0] = load_input_data(input);
+ in[1] = load_input_data(input + 32);
+ in[2] = load_input_data(input + 64);
+ in[3] = load_input_data(input + 96);
+ in[4] = load_input_data(input + 128);
+ in[5] = load_input_data(input + 160);
+ in[6] = load_input_data(input + 192);
+ in[7] = load_input_data(input + 224);
+
+ array_transpose_8x8(in, in);
+ idct32_34(in, stp1);
+
+ // 1_D: Store 32 intermediate results for each 8x32 block.
+ col[0] = _mm_add_epi16(stp1[0], stp1[31]);
+ col[1] = _mm_add_epi16(stp1[1], stp1[30]);
+ col[2] = _mm_add_epi16(stp1[2], stp1[29]);
+ col[3] = _mm_add_epi16(stp1[3], stp1[28]);
+ col[4] = _mm_add_epi16(stp1[4], stp1[27]);
+ col[5] = _mm_add_epi16(stp1[5], stp1[26]);
+ col[6] = _mm_add_epi16(stp1[6], stp1[25]);
+ col[7] = _mm_add_epi16(stp1[7], stp1[24]);
+ col[8] = _mm_add_epi16(stp1[8], stp1[23]);
+ col[9] = _mm_add_epi16(stp1[9], stp1[22]);
+ col[10] = _mm_add_epi16(stp1[10], stp1[21]);
+ col[11] = _mm_add_epi16(stp1[11], stp1[20]);
+ col[12] = _mm_add_epi16(stp1[12], stp1[19]);
+ col[13] = _mm_add_epi16(stp1[13], stp1[18]);
+ col[14] = _mm_add_epi16(stp1[14], stp1[17]);
+ col[15] = _mm_add_epi16(stp1[15], stp1[16]);
+ col[16] = _mm_sub_epi16(stp1[15], stp1[16]);
+ col[17] = _mm_sub_epi16(stp1[14], stp1[17]);
+ col[18] = _mm_sub_epi16(stp1[13], stp1[18]);
+ col[19] = _mm_sub_epi16(stp1[12], stp1[19]);
+ col[20] = _mm_sub_epi16(stp1[11], stp1[20]);
+ col[21] = _mm_sub_epi16(stp1[10], stp1[21]);
+ col[22] = _mm_sub_epi16(stp1[9], stp1[22]);
+ col[23] = _mm_sub_epi16(stp1[8], stp1[23]);
+ col[24] = _mm_sub_epi16(stp1[7], stp1[24]);
+ col[25] = _mm_sub_epi16(stp1[6], stp1[25]);
+ col[26] = _mm_sub_epi16(stp1[5], stp1[26]);
+ col[27] = _mm_sub_epi16(stp1[4], stp1[27]);
+ col[28] = _mm_sub_epi16(stp1[3], stp1[28]);
+ col[29] = _mm_sub_epi16(stp1[2], stp1[29]);
+ col[30] = _mm_sub_epi16(stp1[1], stp1[30]);
+ col[31] = _mm_sub_epi16(stp1[0], stp1[31]);
+ for (i = 0; i < 4; i++) {
+ int j;
+ // Transpose 32x8 block to 8x32 block
+ array_transpose_8x8(col + i * 8, in);
+ idct32_34(in, stp1);
+
+ // 2_D: Calculate the results and store them to destination.
+ in[0] = _mm_add_epi16(stp1[0], stp1[31]);
+ in[1] = _mm_add_epi16(stp1[1], stp1[30]);
+ in[2] = _mm_add_epi16(stp1[2], stp1[29]);
+ in[3] = _mm_add_epi16(stp1[3], stp1[28]);
+ in[4] = _mm_add_epi16(stp1[4], stp1[27]);
+ in[5] = _mm_add_epi16(stp1[5], stp1[26]);
+ in[6] = _mm_add_epi16(stp1[6], stp1[25]);
+ in[7] = _mm_add_epi16(stp1[7], stp1[24]);
+ in[8] = _mm_add_epi16(stp1[8], stp1[23]);
+ in[9] = _mm_add_epi16(stp1[9], stp1[22]);
+ in[10] = _mm_add_epi16(stp1[10], stp1[21]);
+ in[11] = _mm_add_epi16(stp1[11], stp1[20]);
+ in[12] = _mm_add_epi16(stp1[12], stp1[19]);
+ in[13] = _mm_add_epi16(stp1[13], stp1[18]);
+ in[14] = _mm_add_epi16(stp1[14], stp1[17]);
+ in[15] = _mm_add_epi16(stp1[15], stp1[16]);
+ in[16] = _mm_sub_epi16(stp1[15], stp1[16]);
+ in[17] = _mm_sub_epi16(stp1[14], stp1[17]);
+ in[18] = _mm_sub_epi16(stp1[13], stp1[18]);
+ in[19] = _mm_sub_epi16(stp1[12], stp1[19]);
+ in[20] = _mm_sub_epi16(stp1[11], stp1[20]);
+ in[21] = _mm_sub_epi16(stp1[10], stp1[21]);
+ in[22] = _mm_sub_epi16(stp1[9], stp1[22]);
+ in[23] = _mm_sub_epi16(stp1[8], stp1[23]);
+ in[24] = _mm_sub_epi16(stp1[7], stp1[24]);
+ in[25] = _mm_sub_epi16(stp1[6], stp1[25]);
+ in[26] = _mm_sub_epi16(stp1[5], stp1[26]);
+ in[27] = _mm_sub_epi16(stp1[4], stp1[27]);
+ in[28] = _mm_sub_epi16(stp1[3], stp1[28]);
+ in[29] = _mm_sub_epi16(stp1[2], stp1[29]);
+ in[30] = _mm_sub_epi16(stp1[1], stp1[30]);
+ in[31] = _mm_sub_epi16(stp1[0], stp1[31]);
+
+ for (j = 0; j < 32; ++j) {
+ // Final rounding and shift
+ in[j] = _mm_adds_epi16(in[j], final_rounding);
+ in[j] = _mm_srai_epi16(in[j], 6);
+ RECON_AND_STORE(dest + j * stride, in[j]);
+ }
+
+ dest += 8;
+ }
+}
--- a/vpx_dsp/x86/inv_txfm_ssse3_x86_64.asm
+++ b/vpx_dsp/x86/inv_txfm_ssse3_x86_64.asm
@@ -617,61 +617,6 @@
%define transposed_in 16*32*4
%define pass_one_start 16*32*0
%define stp r8
-
-INIT_XMM ssse3
-cglobal idct32x32_34_add, 3, 11, 16, i32x32_size, input, output, stride
- mova m8, [pd_8192]
- lea stp, [rsp + pass_one_start]
-
-idct32x32_34:
- mov r3, inputq
- lea r4, [rsp + transposed_in]
-
-idct32x32_34_transpose:
- LOAD_TRAN_LOW 0, r3, 0
- LOAD_TRAN_LOW 1, r3, 4
- LOAD_TRAN_LOW 2, r3, 8
- LOAD_TRAN_LOW 3, r3, 12
- LOAD_TRAN_LOW 4, r3, 16
- LOAD_TRAN_LOW 5, r3, 20
- LOAD_TRAN_LOW 6, r3, 24
- LOAD_TRAN_LOW 7, r3, 28
-
- TRANSPOSE8X8 0, 1, 2, 3, 4, 5, 6, 7, 9, 10
-
- IDCT32X32_34 16*0, 16*32, 16*64, 16*96
- lea stp, [stp + 16 * 8]
- mov r6, 4
- lea stp, [rsp + pass_one_start]
- lea r9, [rsp + pass_one_start]
-
-idct32x32_34_2:
- lea r4, [rsp + transposed_in]
- mov r3, r9
-
-idct32x32_34_transpose_2:
- mova m0, [r3 + 0]
- mova m1, [r3 + 16 * 1]
- mova m2, [r3 + 16 * 2]
- mova m3, [r3 + 16 * 3]
- mova m4, [r3 + 16 * 4]
- mova m5, [r3 + 16 * 5]
- mova m6, [r3 + 16 * 6]
- mova m7, [r3 + 16 * 7]
-
- TRANSPOSE8X8 0, 1, 2, 3, 4, 5, 6, 7, 9, 10
-
- IDCT32X32_34 16*0, 16*8, 16*16, 16*24
-
- lea stp, [stp + 16 * 32]
- add r9, 16 * 32
- dec r6
- jnz idct32x32_34_2
-
- RECON_AND_STORE pass_two_start
-
- RET
-
%macro IDCT32X32_135 4
; BLOCK A STAGE 1 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
mova m1, [rsp + transposed_in + 16 * 1]