ref: 8253a27904c5d6b681143ea9d96d171ce7a6a2c9
parent: b8a4b5dd8d2ae895ff08e88c2cd2b9b8c8bf17c5
author: Linfeng Zhang <[email protected]>
date: Fri Jun 23 10:27:18 EDT 2017
Add vpx_highbd_idct4x4_16_add_sse4_1() BUG=webm:1412 Change-Id: Ie33482409351a01be4e89466b0441834eb1e905a
--- a/test/partial_idct_test.cc
+++ b/test/partial_idct_test.cc
@@ -477,7 +477,9 @@
INSTANTIATE_TEST_CASE_P(C, PartialIDctTest,
::testing::ValuesIn(c_partial_idct_tests));
-#if HAVE_NEON && !CONFIG_EMULATE_HARDWARE
+#if !CONFIG_EMULATE_HARDWARE
+
+#if HAVE_NEON
const PartialInvTxfmParam neon_partial_idct_tests[] = {
#if CONFIG_VP9_HIGHBITDEPTH
make_tuple(&vpx_highbd_fdct32x32_c,
@@ -625,9 +627,9 @@
INSTANTIATE_TEST_CASE_P(NEON, PartialIDctTest,
::testing::ValuesIn(neon_partial_idct_tests));
-#endif // HAVE_NEON && !CONFIG_EMULATE_HARDWARE
+#endif // HAVE_NEON
-#if HAVE_SSE2 && !CONFIG_EMULATE_HARDWARE
+#if HAVE_SSE2
// 32x32_135_ is implemented using the 1024 version.
const PartialInvTxfmParam sse2_partial_idct_tests[] = {
#if CONFIG_VP9_HIGHBITDEPTH
@@ -734,9 +736,9 @@
INSTANTIATE_TEST_CASE_P(SSE2, PartialIDctTest,
::testing::ValuesIn(sse2_partial_idct_tests));
-#endif // HAVE_SSE2 && !CONFIG_EMULATE_HARDWARE
+#endif // HAVE_SSE2
-#if HAVE_SSSE3 && !CONFIG_EMULATE_HARDWARE
+#if HAVE_SSSE3
const PartialInvTxfmParam ssse3_partial_idct_tests[] = {
make_tuple(&vpx_fdct32x32_c, &wrapper<vpx_idct32x32_135_add_c>,
&wrapper<vpx_idct32x32_135_add_ssse3>, TX_32X32, 135, 8, 1),
@@ -748,9 +750,26 @@
INSTANTIATE_TEST_CASE_P(SSSE3, PartialIDctTest,
::testing::ValuesIn(ssse3_partial_idct_tests));
-#endif // HAVE_SSSE3 && ARCH_X86_64 && !CONFIG_EMULATE_HARDWARE
+#endif // HAVE_SSSE3
-#if HAVE_DSPR2 && !CONFIG_EMULATE_HARDWARE && !CONFIG_VP9_HIGHBITDEPTH
+#if HAVE_SSE4_1 && CONFIG_VP9_HIGHBITDEPTH
+const PartialInvTxfmParam sse4_1_partial_idct_tests[] = {
+ make_tuple(
+ &vpx_highbd_fdct4x4_c, &highbd_wrapper<vpx_highbd_idct4x4_16_add_c>,
+ &highbd_wrapper<vpx_highbd_idct4x4_16_add_sse4_1>, TX_4X4, 16, 8, 2),
+ make_tuple(
+ &vpx_highbd_fdct4x4_c, &highbd_wrapper<vpx_highbd_idct4x4_16_add_c>,
+ &highbd_wrapper<vpx_highbd_idct4x4_16_add_sse4_1>, TX_4X4, 16, 10, 2),
+ make_tuple(
+ &vpx_highbd_fdct4x4_c, &highbd_wrapper<vpx_highbd_idct4x4_16_add_c>,
+ &highbd_wrapper<vpx_highbd_idct4x4_16_add_sse4_1>, TX_4X4, 16, 12, 2)
+};
+
+INSTANTIATE_TEST_CASE_P(SSE4_1, PartialIDctTest,
+ ::testing::ValuesIn(sse4_1_partial_idct_tests));
+#endif // HAVE_SSE4_1 && CONFIG_VP9_HIGHBITDEPTH
+
+#if HAVE_DSPR2 && !CONFIG_VP9_HIGHBITDEPTH
const PartialInvTxfmParam dspr2_partial_idct_tests[] = {
make_tuple(&vpx_fdct32x32_c, &wrapper<vpx_idct32x32_1024_add_c>,
&wrapper<vpx_idct32x32_1024_add_dspr2>, TX_32X32, 1024, 8, 1),
@@ -778,9 +797,9 @@
INSTANTIATE_TEST_CASE_P(DSPR2, PartialIDctTest,
::testing::ValuesIn(dspr2_partial_idct_tests));
-#endif // HAVE_DSPR2 && !CONFIG_EMULATE_HARDWARE && !CONFIG_VP9_HIGHBITDEPTH
+#endif // HAVE_DSPR2 && !CONFIG_VP9_HIGHBITDEPTH
-#if HAVE_MSA && !CONFIG_EMULATE_HARDWARE && !CONFIG_VP9_HIGHBITDEPTH
+#if HAVE_MSA && !CONFIG_VP9_HIGHBITDEPTH
// 32x32_135_ is implemented using the 1024 version.
const PartialInvTxfmParam msa_partial_idct_tests[] = {
make_tuple(&vpx_fdct32x32_c, &wrapper<vpx_idct32x32_1024_add_c>,
@@ -809,6 +828,8 @@
INSTANTIATE_TEST_CASE_P(MSA, PartialIDctTest,
::testing::ValuesIn(msa_partial_idct_tests));
-#endif // HAVE_MSA && !CONFIG_EMULATE_HARDWARE && !CONFIG_VP9_HIGHBITDEPTH
+#endif // HAVE_MSA && !CONFIG_VP9_HIGHBITDEPTH
+
+#endif // !CONFIG_EMULATE_HARDWARE
} // namespace
--- a/vpx_dsp/vpx_dsp.mk
+++ b/vpx_dsp/vpx_dsp.mk
@@ -239,6 +239,7 @@
DSP_SRCS-$(HAVE_SSE2) += x86/highbd_idct8x8_add_sse2.c
DSP_SRCS-$(HAVE_SSE2) += x86/highbd_idct16x16_add_sse2.c
DSP_SRCS-$(HAVE_SSE2) += x86/highbd_idct32x32_add_sse2.c
+DSP_SRCS-$(HAVE_SSE4_1) += x86/highbd_idct4x4_add_sse4.c
endif # !CONFIG_VP9_HIGHBITDEPTH
ifeq ($(HAVE_NEON_ASM),yes)
--- a/vpx_dsp/vpx_dsp_rtcd_defs.pl
+++ b/vpx_dsp/vpx_dsp_rtcd_defs.pl
@@ -652,7 +652,7 @@
add_proto qw/void vpx_highbd_iwht4x4_1_add/, "const tran_low_t *input, uint16_t *dest, int stride, int bd";
if (vpx_config("CONFIG_EMULATE_HARDWARE") ne "yes") {
- specialize qw/vpx_highbd_idct4x4_16_add neon sse2/;
+ specialize qw/vpx_highbd_idct4x4_16_add neon sse2 sse4_1/;
specialize qw/vpx_highbd_idct8x8_64_add neon sse2/;
specialize qw/vpx_highbd_idct8x8_12_add neon sse2/;
specialize qw/vpx_highbd_idct16x16_256_add neon sse2/;
--- a/vpx_dsp/x86/highbd_idct4x4_add_sse2.c
+++ b/vpx_dsp/x86/highbd_idct4x4_add_sse2.c
@@ -12,7 +12,6 @@
#include "vpx_dsp/x86/highbd_inv_txfm_sse2.h"
#include "vpx_dsp/x86/inv_txfm_sse2.h"
#include "vpx_dsp/x86/transpose_sse2.h"
-#include "vpx_dsp/x86/txfm_common_sse2.h"
static INLINE __m128i dct_const_round_shift_4_sse2(const __m128i in0,
const __m128i in1) {
@@ -22,16 +21,6 @@
return dct_const_round_shift_sse2(t2);
}
-static INLINE __m128i wraplow_16bit_sse2(const __m128i in0, const __m128i in1,
- const __m128i rounding) {
- __m128i temp[2];
- temp[0] = _mm_add_epi32(in0, rounding);
- temp[1] = _mm_add_epi32(in1, rounding);
- temp[0] = _mm_srai_epi32(temp[0], 4);
- temp[1] = _mm_srai_epi32(temp[1], 4);
- return _mm_packs_epi32(temp[0], temp[1]);
-}
-
static INLINE void highbd_idct4_small_sse2(__m128i *const io) {
const __m128i cospi_p16_p16 = _mm_setr_epi32(cospi_16_64, 0, cospi_16_64, 0);
const __m128i cospi_p08_p08 = _mm_setr_epi32(cospi_8_64, 0, cospi_8_64, 0);
@@ -100,19 +89,6 @@
return _mm_sub_epi64(out, sign);
}
-static INLINE __m128i dct_const_round_shift_64bit_sse2(const __m128i in) {
- const __m128i t = _mm_add_epi64(
- in,
- _mm_setr_epi32(DCT_CONST_ROUNDING << 2, 0, DCT_CONST_ROUNDING << 2, 0));
- return _mm_srli_si128(t, 2);
-}
-
-static INLINE __m128i pack_4_sse2(const __m128i in0, const __m128i in1) {
- const __m128i t0 = _mm_unpacklo_epi32(in0, in1); // 0, 2
- const __m128i t1 = _mm_unpackhi_epi32(in0, in1); // 1, 3
- return _mm_unpacklo_epi32(t0, t1); // 0, 1, 2, 3
-}
-
static INLINE void highbd_idct4_large_sse2(__m128i *const io) {
const __m128i cospi_p16_p16 =
_mm_setr_epi32(cospi_16_64 << 2, 0, cospi_16_64 << 2, 0);
@@ -133,12 +109,12 @@
temp1[1] = multiply_apply_sign_sse2(temp1[1], sign1[1], cospi_p16_p16);
temp2[0] = multiply_apply_sign_sse2(temp2[0], sign2[0], cospi_p16_p16);
temp2[1] = multiply_apply_sign_sse2(temp2[1], sign2[1], cospi_p16_p16);
- temp1[0] = dct_const_round_shift_64bit_sse2(temp1[0]);
- temp1[1] = dct_const_round_shift_64bit_sse2(temp1[1]);
- temp2[0] = dct_const_round_shift_64bit_sse2(temp2[0]);
- temp2[1] = dct_const_round_shift_64bit_sse2(temp2[1]);
- step[0] = pack_4_sse2(temp1[0], temp1[1]);
- step[1] = pack_4_sse2(temp2[0], temp2[1]);
+ temp1[0] = dct_const_round_shift_64bit(temp1[0]);
+ temp1[1] = dct_const_round_shift_64bit(temp1[1]);
+ temp2[0] = dct_const_round_shift_64bit(temp2[0]);
+ temp2[1] = dct_const_round_shift_64bit(temp2[1]);
+ step[0] = pack_4(temp1[0], temp1[1]);
+ step[1] = pack_4(temp2[0], temp2[1]);
abs_extend_64bit_sse2(io[1], temp1, sign1);
abs_extend_64bit_sse2(io[3], temp2, sign2);
@@ -154,12 +130,12 @@
temp1[1] = _mm_sub_epi64(temp1[1], temp2[1]); // [1]*cospi_24 - [3]*cospi_8
temp2[0] = _mm_add_epi64(temp1[2], temp2[2]); // [1]*cospi_8 + [3]*cospi_24
temp2[1] = _mm_add_epi64(temp1[3], temp2[3]); // [1]*cospi_8 + [3]*cospi_24
- temp1[0] = dct_const_round_shift_64bit_sse2(temp1[0]);
- temp1[1] = dct_const_round_shift_64bit_sse2(temp1[1]);
- temp2[0] = dct_const_round_shift_64bit_sse2(temp2[0]);
- temp2[1] = dct_const_round_shift_64bit_sse2(temp2[1]);
- step[2] = pack_4_sse2(temp1[0], temp1[1]);
- step[3] = pack_4_sse2(temp2[0], temp2[1]);
+ temp1[0] = dct_const_round_shift_64bit(temp1[0]);
+ temp1[1] = dct_const_round_shift_64bit(temp1[1]);
+ temp2[0] = dct_const_round_shift_64bit(temp2[0]);
+ temp2[1] = dct_const_round_shift_64bit(temp2[1]);
+ step[2] = pack_4(temp1[0], temp1[1]);
+ step[3] = pack_4(temp2[0], temp2[1]);
// stage 2
io[0] = _mm_add_epi32(step[0], step[3]); // step[0] + step[3]
@@ -211,31 +187,11 @@
highbd_idct4_large_sse2(io);
highbd_idct4_large_sse2(io);
}
- io[0] = wraplow_16bit_sse2(io[0], io[1], _mm_set1_epi32(8));
- io[1] = wraplow_16bit_sse2(io[2], io[3], _mm_set1_epi32(8));
+ io[0] = wraplow_16bit(io[0], io[1], _mm_set1_epi32(8));
+ io[1] = wraplow_16bit(io[2], io[3], _mm_set1_epi32(8));
}
- // Reconstruction and Store
- {
- __m128i d0 = _mm_loadl_epi64((const __m128i *)dest);
- __m128i d2 = _mm_loadl_epi64((const __m128i *)(dest + stride * 2));
- d0 = _mm_unpacklo_epi64(d0,
- _mm_loadl_epi64((const __m128i *)(dest + stride)));
- d2 = _mm_unpacklo_epi64(
- d2, _mm_loadl_epi64((const __m128i *)(dest + stride * 3)));
- d0 = clamp_high_sse2(_mm_adds_epi16(d0, io[0]), bd);
- d2 = clamp_high_sse2(_mm_adds_epi16(d2, io[1]), bd);
- // store input0
- _mm_storel_epi64((__m128i *)dest, d0);
- // store input1
- d0 = _mm_srli_si128(d0, 8);
- _mm_storel_epi64((__m128i *)(dest + stride), d0);
- // store input2
- _mm_storel_epi64((__m128i *)(dest + stride * 2), d2);
- // store input3
- d2 = _mm_srli_si128(d2, 8);
- _mm_storel_epi64((__m128i *)(dest + stride * 3), d2);
- }
+ recon_and_store_4(dest, io, stride, bd);
}
void vpx_highbd_idct4x4_1_add_sse2(const tran_low_t *input, uint16_t *dest,
--- /dev/null
+++ b/vpx_dsp/x86/highbd_idct4x4_add_sse4.c
@@ -1,0 +1,107 @@
+/*
+ * Copyright (c) 2017 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <smmintrin.h>
+
+#include "./vpx_dsp_rtcd.h"
+#include "vpx_dsp/x86/highbd_inv_txfm_sse2.h"
+#include "vpx_dsp/x86/inv_txfm_sse2.h"
+#include "vpx_dsp/x86/transpose_sse2.h"
+
+static INLINE void extend_64bit(const __m128i in,
+ __m128i *const out /*out[2]*/) {
+ out[0] = _mm_unpacklo_epi32(in, in); // 0, 0, 1, 1
+ out[1] = _mm_unpackhi_epi32(in, in); // 2, 2, 3, 3
+}
+
+static INLINE void highbd_idct4(__m128i *const io) {
+ const __m128i cospi_p16_p16 =
+ _mm_setr_epi32(cospi_16_64 << 2, 0, cospi_16_64 << 2, 0);
+ const __m128i cospi_p08_p08 =
+ _mm_setr_epi32(cospi_8_64 << 2, 0, cospi_8_64 << 2, 0);
+ const __m128i cospi_p24_p24 =
+ _mm_setr_epi32(cospi_24_64 << 2, 0, cospi_24_64 << 2, 0);
+ __m128i temp1[4], temp2[4], step[4];
+
+ transpose_32bit_4x4(&io[0], &io[1], &io[2], &io[3]);
+
+ // stage 1
+ temp1[0] = _mm_add_epi32(io[0], io[2]); // input[0] + input[2]
+ temp2[0] = _mm_sub_epi32(io[0], io[2]); // input[0] - input[2]
+ extend_64bit(temp1[0], temp1);
+ extend_64bit(temp2[0], temp2);
+ temp1[0] = _mm_mul_epi32(temp1[0], cospi_p16_p16);
+ temp1[1] = _mm_mul_epi32(temp1[1], cospi_p16_p16);
+ temp2[0] = _mm_mul_epi32(temp2[0], cospi_p16_p16);
+ temp2[1] = _mm_mul_epi32(temp2[1], cospi_p16_p16);
+ temp1[0] = dct_const_round_shift_64bit(temp1[0]);
+ temp1[1] = dct_const_round_shift_64bit(temp1[1]);
+ temp2[0] = dct_const_round_shift_64bit(temp2[0]);
+ temp2[1] = dct_const_round_shift_64bit(temp2[1]);
+ step[0] = pack_4(temp1[0], temp1[1]);
+ step[1] = pack_4(temp2[0], temp2[1]);
+
+ extend_64bit(io[1], temp1);
+ extend_64bit(io[3], temp2);
+ temp1[2] = _mm_mul_epi32(temp1[0], cospi_p08_p08);
+ temp1[3] = _mm_mul_epi32(temp1[1], cospi_p08_p08);
+ temp1[0] = _mm_mul_epi32(temp1[0], cospi_p24_p24);
+ temp1[1] = _mm_mul_epi32(temp1[1], cospi_p24_p24);
+ temp2[2] = _mm_mul_epi32(temp2[0], cospi_p24_p24);
+ temp2[3] = _mm_mul_epi32(temp2[1], cospi_p24_p24);
+ temp2[0] = _mm_mul_epi32(temp2[0], cospi_p08_p08);
+ temp2[1] = _mm_mul_epi32(temp2[1], cospi_p08_p08);
+ temp1[0] = _mm_sub_epi64(temp1[0], temp2[0]); // [1]*cospi_24 - [3]*cospi_8
+ temp1[1] = _mm_sub_epi64(temp1[1], temp2[1]); // [1]*cospi_24 - [3]*cospi_8
+ temp2[0] = _mm_add_epi64(temp1[2], temp2[2]); // [1]*cospi_8 + [3]*cospi_24
+ temp2[1] = _mm_add_epi64(temp1[3], temp2[3]); // [1]*cospi_8 + [3]*cospi_24
+ temp1[0] = dct_const_round_shift_64bit(temp1[0]);
+ temp1[1] = dct_const_round_shift_64bit(temp1[1]);
+ temp2[0] = dct_const_round_shift_64bit(temp2[0]);
+ temp2[1] = dct_const_round_shift_64bit(temp2[1]);
+ step[2] = pack_4(temp1[0], temp1[1]);
+ step[3] = pack_4(temp2[0], temp2[1]);
+
+ // stage 2
+ io[0] = _mm_add_epi32(step[0], step[3]); // step[0] + step[3]
+ io[1] = _mm_add_epi32(step[1], step[2]); // step[1] + step[2]
+ io[2] = _mm_sub_epi32(step[1], step[2]); // step[1] - step[2]
+ io[3] = _mm_sub_epi32(step[0], step[3]); // step[0] - step[3]
+}
+
+void vpx_highbd_idct4x4_16_add_sse4_1(const tran_low_t *input, uint16_t *dest,
+ int stride, int bd) {
+ __m128i io[4];
+
+ io[0] = _mm_load_si128((const __m128i *)(input + 0));
+ io[1] = _mm_load_si128((const __m128i *)(input + 4));
+ io[2] = _mm_load_si128((const __m128i *)(input + 8));
+ io[3] = _mm_load_si128((const __m128i *)(input + 12));
+
+ if (bd == 8) {
+ __m128i io_short[2];
+
+ io_short[0] = _mm_packs_epi32(io[0], io[1]);
+ io_short[1] = _mm_packs_epi32(io[2], io[3]);
+ idct4_sse2(io_short);
+ idct4_sse2(io_short);
+ io_short[0] = _mm_add_epi16(io_short[0], _mm_set1_epi16(8));
+ io_short[1] = _mm_add_epi16(io_short[1], _mm_set1_epi16(8));
+ io[0] = _mm_srai_epi16(io_short[0], 4);
+ io[1] = _mm_srai_epi16(io_short[1], 4);
+ } else {
+ highbd_idct4(io);
+ highbd_idct4(io);
+ io[0] = wraplow_16bit(io[0], io[1], _mm_set1_epi32(8));
+ io[1] = wraplow_16bit(io[2], io[3], _mm_set1_epi32(8));
+ }
+
+ recon_and_store_4(dest, io, stride, bd);
+}
--- a/vpx_dsp/x86/highbd_inv_txfm_sse2.h
+++ b/vpx_dsp/x86/highbd_inv_txfm_sse2.h
@@ -17,6 +17,29 @@
#include "vpx_dsp/inv_txfm.h"
#include "vpx_dsp/x86/txfm_common_sse2.h"
+static INLINE __m128i wraplow_16bit(const __m128i in0, const __m128i in1,
+ const __m128i rounding) {
+ __m128i temp[2];
+ temp[0] = _mm_add_epi32(in0, rounding);
+ temp[1] = _mm_add_epi32(in1, rounding);
+ temp[0] = _mm_srai_epi32(temp[0], 4);
+ temp[1] = _mm_srai_epi32(temp[1], 4);
+ return _mm_packs_epi32(temp[0], temp[1]);
+}
+
+static INLINE __m128i dct_const_round_shift_64bit(const __m128i in) {
+ const __m128i t = _mm_add_epi64(
+ in,
+ _mm_setr_epi32(DCT_CONST_ROUNDING << 2, 0, DCT_CONST_ROUNDING << 2, 0));
+ return _mm_srli_si128(t, 2);
+}
+
+static INLINE __m128i pack_4(const __m128i in0, const __m128i in1) {
+ const __m128i t0 = _mm_unpacklo_epi32(in0, in1); // 0, 2
+ const __m128i t1 = _mm_unpackhi_epi32(in0, in1); // 1, 3
+ return _mm_unpacklo_epi32(t0, t1); // 0, 1, 2, 3
+}
+
static INLINE __m128i add_dc_clamp(const __m128i *const min,
const __m128i *const max,
const __m128i *const dc,
@@ -65,6 +88,25 @@
retval = _mm_or_si128(retval, ubounded);
retval = _mm_and_si128(retval, _mm_cmpgt_epi16(retval, zero));
return retval;
+}
+
+static INLINE void recon_and_store_4(uint16_t *const dest,
+ const __m128i *const io, const int stride,
+ int bd) {
+ __m128i d0 = _mm_loadl_epi64((const __m128i *)dest);
+ __m128i d2 = _mm_loadl_epi64((const __m128i *)(dest + stride * 2));
+ d0 =
+ _mm_unpacklo_epi64(d0, _mm_loadl_epi64((const __m128i *)(dest + stride)));
+ d2 = _mm_unpacklo_epi64(
+ d2, _mm_loadl_epi64((const __m128i *)(dest + stride * 3)));
+ d0 = clamp_high_sse2(_mm_adds_epi16(d0, io[0]), bd);
+ d2 = clamp_high_sse2(_mm_adds_epi16(d2, io[1]), bd);
+ _mm_storel_epi64((__m128i *)dest, d0);
+ d0 = _mm_srli_si128(d0, 8);
+ _mm_storel_epi64((__m128i *)(dest + stride), d0);
+ _mm_storel_epi64((__m128i *)(dest + stride * 2), d2);
+ d2 = _mm_srli_si128(d2, 8);
+ _mm_storel_epi64((__m128i *)(dest + stride * 3), d2);
}
#endif // VPX_DSP_X86_HIGHBD_INV_TXFM_SSE2_H_