ref: bc86e2c6a25ad3e7efb02875542f1525db4bea42
parent: c39cd9235ece2e55f0cc84fb0f9f5e698146ee9c
author: Scott LaVarnway <[email protected]>
date: Tue Sep 12 14:01:31 EDT 2017
vpxdsp: [x86] add highbd_d63_predictor functions C vs SSE2 speed gains: _4x4 : ~2.94x C vs SSSE3 speed gains: _8x8 : ~8.69x _16x16 : ~6.32x _32x32 : ~5.33x BUG=webm:1411 Change-Id: I2c35b527eac2229f17aaa9d118fb601e7195efe4
--- a/test/test_intra_pred_speed.cc
+++ b/test/test_intra_pred_speed.cc
@@ -485,7 +485,8 @@
vpx_highbd_dc_left_predictor_4x4_sse2, vpx_highbd_dc_top_predictor_4x4_sse2,
vpx_highbd_dc_128_predictor_4x4_sse2, vpx_highbd_v_predictor_4x4_sse2,
vpx_highbd_h_predictor_4x4_sse2, NULL, NULL, NULL, NULL,
- vpx_highbd_d207_predictor_4x4_sse2, NULL, vpx_highbd_tm_predictor_4x4_c)
+ vpx_highbd_d207_predictor_4x4_sse2, vpx_highbd_d63_predictor_4x4_sse2,
+ vpx_highbd_tm_predictor_4x4_c)
HIGHBD_INTRA_PRED_TEST(SSE2, TestHighbdIntraPred8,
vpx_highbd_dc_predictor_8x8_sse2,
@@ -520,13 +521,16 @@
NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL)
HIGHBD_INTRA_PRED_TEST(SSSE3, TestHighbdIntraPred8, NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL, NULL, NULL,
- vpx_highbd_d207_predictor_8x8_ssse3, NULL, NULL)
+ vpx_highbd_d207_predictor_8x8_ssse3,
+ vpx_highbd_d63_predictor_8x8_ssse3, NULL)
HIGHBD_INTRA_PRED_TEST(SSSE3, TestHighbdIntraPred16, NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL, NULL, NULL,
- vpx_highbd_d207_predictor_16x16_ssse3, NULL, NULL)
+ vpx_highbd_d207_predictor_16x16_ssse3,
+ vpx_highbd_d63_predictor_16x16_ssse3, NULL)
HIGHBD_INTRA_PRED_TEST(SSSE3, TestHighbdIntraPred32, NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL, NULL, NULL,
- vpx_highbd_d207_predictor_32x32_ssse3, NULL, NULL)
+ vpx_highbd_d207_predictor_32x32_ssse3,
+ vpx_highbd_d63_predictor_32x32_ssse3, NULL)
#endif // HAVE_SSSE3
#if HAVE_NEON
--- a/test/vp9_intrapred_test.cc
+++ b/test/vp9_intrapred_test.cc
@@ -471,6 +471,12 @@
INSTANTIATE_TEST_CASE_P(
SSSE3_TO_C_8, VP9HighbdIntraPredTest,
::testing::Values(
+ HighbdIntraPredParam(&vpx_highbd_d63_predictor_8x8_ssse3,
+ &vpx_highbd_d63_predictor_8x8_c, 8, 8),
+ HighbdIntraPredParam(&vpx_highbd_d63_predictor_16x16_ssse3,
+ &vpx_highbd_d63_predictor_16x16_c, 16, 8),
+ HighbdIntraPredParam(&vpx_highbd_d63_predictor_32x32_c,
+ &vpx_highbd_d63_predictor_32x32_ssse3, 32, 8),
HighbdIntraPredParam(&vpx_highbd_d207_predictor_8x8_ssse3,
&vpx_highbd_d207_predictor_8x8_c, 8, 8),
HighbdIntraPredParam(&vpx_highbd_d207_predictor_16x16_ssse3,
@@ -481,6 +487,12 @@
INSTANTIATE_TEST_CASE_P(
SSSE3_TO_C_10, VP9HighbdIntraPredTest,
::testing::Values(
+ HighbdIntraPredParam(&vpx_highbd_d63_predictor_8x8_ssse3,
+ &vpx_highbd_d63_predictor_8x8_c, 8, 10),
+ HighbdIntraPredParam(&vpx_highbd_d63_predictor_16x16_ssse3,
+ &vpx_highbd_d63_predictor_16x16_c, 16, 10),
+ HighbdIntraPredParam(&vpx_highbd_d63_predictor_32x32_c,
+ &vpx_highbd_d63_predictor_32x32_ssse3, 32, 10),
HighbdIntraPredParam(&vpx_highbd_d207_predictor_8x8_ssse3,
&vpx_highbd_d207_predictor_8x8_c, 8, 10),
HighbdIntraPredParam(&vpx_highbd_d207_predictor_16x16_ssse3,
@@ -491,6 +503,12 @@
INSTANTIATE_TEST_CASE_P(
SSSE3_TO_C_12, VP9HighbdIntraPredTest,
::testing::Values(
+ HighbdIntraPredParam(&vpx_highbd_d63_predictor_8x8_ssse3,
+ &vpx_highbd_d63_predictor_8x8_c, 8, 12),
+ HighbdIntraPredParam(&vpx_highbd_d63_predictor_16x16_ssse3,
+ &vpx_highbd_d63_predictor_16x16_c, 16, 12),
+ HighbdIntraPredParam(&vpx_highbd_d63_predictor_32x32_c,
+ &vpx_highbd_d63_predictor_32x32_ssse3, 32, 12),
HighbdIntraPredParam(&vpx_highbd_d207_predictor_8x8_ssse3,
&vpx_highbd_d207_predictor_8x8_c, 8, 12),
HighbdIntraPredParam(&vpx_highbd_d207_predictor_16x16_ssse3,
@@ -511,6 +529,8 @@
&vpx_highbd_dc_128_predictor_16x16_c, 16, 8),
HighbdIntraPredParam(&vpx_highbd_dc_128_predictor_32x32_sse2,
&vpx_highbd_dc_128_predictor_32x32_c, 32, 8),
+ HighbdIntraPredParam(&vpx_highbd_d63_predictor_4x4_sse2,
+ &vpx_highbd_d63_predictor_4x4_c, 4, 8),
HighbdIntraPredParam(&vpx_highbd_d207_predictor_4x4_sse2,
&vpx_highbd_d207_predictor_4x4_c, 4, 8),
HighbdIntraPredParam(&vpx_highbd_dc_left_predictor_4x4_sse2,
@@ -573,6 +593,8 @@
&vpx_highbd_dc_128_predictor_16x16_c, 16, 10),
HighbdIntraPredParam(&vpx_highbd_dc_128_predictor_32x32_sse2,
&vpx_highbd_dc_128_predictor_32x32_c, 32, 10),
+ HighbdIntraPredParam(&vpx_highbd_d63_predictor_4x4_sse2,
+ &vpx_highbd_d63_predictor_4x4_c, 4, 10),
HighbdIntraPredParam(&vpx_highbd_d207_predictor_4x4_sse2,
&vpx_highbd_d207_predictor_4x4_c, 4, 10),
HighbdIntraPredParam(&vpx_highbd_dc_left_predictor_4x4_sse2,
@@ -635,6 +657,8 @@
&vpx_highbd_dc_128_predictor_16x16_c, 16, 12),
HighbdIntraPredParam(&vpx_highbd_dc_128_predictor_32x32_sse2,
&vpx_highbd_dc_128_predictor_32x32_c, 32, 12),
+ HighbdIntraPredParam(&vpx_highbd_d63_predictor_4x4_sse2,
+ &vpx_highbd_d63_predictor_4x4_c, 4, 12),
HighbdIntraPredParam(&vpx_highbd_d207_predictor_4x4_sse2,
&vpx_highbd_d207_predictor_4x4_c, 4, 12),
HighbdIntraPredParam(&vpx_highbd_dc_left_predictor_4x4_sse2,
--- a/vpx_dsp/vpx_dsp_rtcd_defs.pl
+++ b/vpx_dsp/vpx_dsp_rtcd_defs.pl
@@ -195,6 +195,7 @@
specialize qw/vpx_highbd_d45_predictor_4x4 neon/;
add_proto qw/void vpx_highbd_d63_predictor_4x4/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
+ specialize qw/vpx_highbd_d63_predictor_4x4 sse2/;
add_proto qw/void vpx_highbd_h_predictor_4x4/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
specialize qw/vpx_highbd_h_predictor_4x4 neon sse2/;
@@ -231,6 +232,7 @@
specialize qw/vpx_highbd_d45_predictor_8x8 neon/;
add_proto qw/void vpx_highbd_d63_predictor_8x8/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
+ specialize qw/vpx_highbd_d63_predictor_8x8 ssse3/;
add_proto qw/void vpx_highbd_h_predictor_8x8/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
specialize qw/vpx_highbd_h_predictor_8x8 neon sse2/;
@@ -267,6 +269,7 @@
specialize qw/vpx_highbd_d45_predictor_16x16 neon/;
add_proto qw/void vpx_highbd_d63_predictor_16x16/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
+ specialize qw/vpx_highbd_d63_predictor_16x16 ssse3/;
add_proto qw/void vpx_highbd_h_predictor_16x16/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
specialize qw/vpx_highbd_h_predictor_16x16 neon sse2/;
@@ -303,6 +306,7 @@
specialize qw/vpx_highbd_d45_predictor_32x32 neon/;
add_proto qw/void vpx_highbd_d63_predictor_32x32/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
+ specialize qw/vpx_highbd_d63_predictor_32x32 ssse3/;
add_proto qw/void vpx_highbd_h_predictor_32x32/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
specialize qw/vpx_highbd_h_predictor_32x32 neon sse2/;
--- a/vpx_dsp/x86/highbd_intrapred_intrin_sse2.c
+++ b/vpx_dsp/x86/highbd_intrapred_intrin_sse2.c
@@ -417,3 +417,26 @@
dst += stride;
_mm_storel_epi64((__m128i *)dst, row3);
}
+
+void vpx_highbd_d63_predictor_4x4_sse2(uint16_t *dst, ptrdiff_t stride,
+ const uint16_t *above,
+ const uint16_t *left, int bd) {
+ const __m128i ABCDEFGH = _mm_loadu_si128((const __m128i *)above);
+ const __m128i BCDEFGH0 = _mm_srli_si128(ABCDEFGH, 2);
+ const __m128i CDEFGH00 = _mm_srli_si128(ABCDEFGH, 4);
+ const __m128i avg3 = avg3_epu16(&ABCDEFGH, &BCDEFGH0, &CDEFGH00);
+ const __m128i avg2 = _mm_avg_epu16(ABCDEFGH, BCDEFGH0);
+ const __m128i row0 = avg2;
+ const __m128i row1 = avg3;
+ const __m128i row2 = _mm_srli_si128(avg2, 2);
+ const __m128i row3 = _mm_srli_si128(avg3, 2);
+ (void)left;
+ (void)bd;
+ _mm_storel_epi64((__m128i *)dst, row0);
+ dst += stride;
+ _mm_storel_epi64((__m128i *)dst, row1);
+ dst += stride;
+ _mm_storel_epi64((__m128i *)dst, row2);
+ dst += stride;
+ _mm_storel_epi64((__m128i *)dst, row3);
+}
--- a/vpx_dsp/x86/highbd_intrapred_intrin_ssse3.c
+++ b/vpx_dsp/x86/highbd_intrapred_intrin_ssse3.c
@@ -179,3 +179,131 @@
d207_store_4x32(&dst, stride, &out_g, &out_h, &LR, &LR, &LR);
d207_store_4x32(&dst, stride, &out_h, &LR, &LR, &LR, &LR);
}
+
+static INLINE void d63_store_4x8(uint16_t **dst, const ptrdiff_t stride,
+ __m128i *a, __m128i *b, const __m128i *ar) {
+ _mm_store_si128((__m128i *)*dst, *a);
+ *dst += stride;
+ _mm_store_si128((__m128i *)*dst, *b);
+ *dst += stride;
+ *a = _mm_alignr_epi8(*ar, *a, 2);
+ *b = _mm_alignr_epi8(*ar, *b, 2);
+ _mm_store_si128((__m128i *)*dst, *a);
+ *dst += stride;
+ _mm_store_si128((__m128i *)*dst, *b);
+ *dst += stride;
+ *a = _mm_alignr_epi8(*ar, *a, 2);
+ *b = _mm_alignr_epi8(*ar, *b, 2);
+}
+
+void vpx_highbd_d63_predictor_8x8_ssse3(uint16_t *dst, ptrdiff_t stride,
+ const uint16_t *above,
+ const uint16_t *left, int bd) {
+ const __m128i ABCDEFGH = _mm_load_si128((const __m128i *)above);
+ const __m128i ABCDHHHH = _mm_shufflehi_epi16(ABCDEFGH, 0xff);
+ const __m128i HHHHHHHH = _mm_unpackhi_epi64(ABCDHHHH, ABCDHHHH);
+ const __m128i BCDEFGHH = _mm_alignr_epi8(HHHHHHHH, ABCDEFGH, 2);
+ const __m128i CDEFGHHH = _mm_alignr_epi8(HHHHHHHH, ABCDEFGH, 4);
+ __m128i avg3 = avg3_epu16(&ABCDEFGH, &BCDEFGHH, &CDEFGHHH);
+ __m128i avg2 = _mm_avg_epu16(ABCDEFGH, BCDEFGHH);
+ (void)left;
+ (void)bd;
+ d63_store_4x8(&dst, stride, &avg2, &avg3, &HHHHHHHH);
+ d63_store_4x8(&dst, stride, &avg2, &avg3, &HHHHHHHH);
+}
+
+void vpx_highbd_d63_predictor_16x16_ssse3(uint16_t *dst, ptrdiff_t stride,
+ const uint16_t *above,
+ const uint16_t *left, int bd) {
+ const __m128i A0 = _mm_load_si128((const __m128i *)above);
+ const __m128i A1 = _mm_load_si128((const __m128i *)(above + 8));
+ const __m128i AR0 = _mm_shufflehi_epi16(A1, 0xff);
+ const __m128i AR = _mm_unpackhi_epi64(AR0, AR0);
+ const __m128i B0 = _mm_alignr_epi8(A1, A0, 2);
+ const __m128i B1 = _mm_alignr_epi8(AR, A1, 2);
+ const __m128i C0 = _mm_alignr_epi8(A1, A0, 4);
+ const __m128i C1 = _mm_alignr_epi8(AR, A1, 4);
+ __m128i avg3_0 = avg3_epu16(&A0, &B0, &C0);
+ __m128i avg3_1 = avg3_epu16(&A1, &B1, &C1);
+ __m128i avg2_0 = _mm_avg_epu16(A0, B0);
+ __m128i avg2_1 = _mm_avg_epu16(A1, B1);
+ int i;
+ (void)left;
+ (void)bd;
+ for (i = 0; i < 14; i += 2) {
+ _mm_store_si128((__m128i *)dst, avg2_0);
+ _mm_store_si128((__m128i *)(dst + 8), avg2_1);
+ dst += stride;
+ _mm_store_si128((__m128i *)dst, avg3_0);
+ _mm_store_si128((__m128i *)(dst + 8), avg3_1);
+ dst += stride;
+ avg2_0 = _mm_alignr_epi8(avg2_1, avg2_0, 2);
+ avg2_1 = _mm_alignr_epi8(AR, avg2_1, 2);
+ avg3_0 = _mm_alignr_epi8(avg3_1, avg3_0, 2);
+ avg3_1 = _mm_alignr_epi8(AR, avg3_1, 2);
+ }
+ _mm_store_si128((__m128i *)dst, avg2_0);
+ _mm_store_si128((__m128i *)(dst + 8), avg2_1);
+ dst += stride;
+ _mm_store_si128((__m128i *)dst, avg3_0);
+ _mm_store_si128((__m128i *)(dst + 8), avg3_1);
+}
+
+void vpx_highbd_d63_predictor_32x32_ssse3(uint16_t *dst, ptrdiff_t stride,
+ const uint16_t *above,
+ const uint16_t *left, int bd) {
+ const __m128i A0 = _mm_load_si128((const __m128i *)above);
+ const __m128i A1 = _mm_load_si128((const __m128i *)(above + 8));
+ const __m128i A2 = _mm_load_si128((const __m128i *)(above + 16));
+ const __m128i A3 = _mm_load_si128((const __m128i *)(above + 24));
+ const __m128i AR0 = _mm_shufflehi_epi16(A3, 0xff);
+ const __m128i AR = _mm_unpackhi_epi64(AR0, AR0);
+ const __m128i B0 = _mm_alignr_epi8(A1, A0, 2);
+ const __m128i B1 = _mm_alignr_epi8(A2, A1, 2);
+ const __m128i B2 = _mm_alignr_epi8(A3, A2, 2);
+ const __m128i B3 = _mm_alignr_epi8(AR, A3, 2);
+ const __m128i C0 = _mm_alignr_epi8(A1, A0, 4);
+ const __m128i C1 = _mm_alignr_epi8(A2, A1, 4);
+ const __m128i C2 = _mm_alignr_epi8(A3, A2, 4);
+ const __m128i C3 = _mm_alignr_epi8(AR, A3, 4);
+ __m128i avg3_0 = avg3_epu16(&A0, &B0, &C0);
+ __m128i avg3_1 = avg3_epu16(&A1, &B1, &C1);
+ __m128i avg3_2 = avg3_epu16(&A2, &B2, &C2);
+ __m128i avg3_3 = avg3_epu16(&A3, &B3, &C3);
+ __m128i avg2_0 = _mm_avg_epu16(A0, B0);
+ __m128i avg2_1 = _mm_avg_epu16(A1, B1);
+ __m128i avg2_2 = _mm_avg_epu16(A2, B2);
+ __m128i avg2_3 = _mm_avg_epu16(A3, B3);
+ int i;
+ (void)left;
+ (void)bd;
+ for (i = 0; i < 30; i += 2) {
+ _mm_store_si128((__m128i *)dst, avg2_0);
+ _mm_store_si128((__m128i *)(dst + 8), avg2_1);
+ _mm_store_si128((__m128i *)(dst + 16), avg2_2);
+ _mm_store_si128((__m128i *)(dst + 24), avg2_3);
+ dst += stride;
+ _mm_store_si128((__m128i *)dst, avg3_0);
+ _mm_store_si128((__m128i *)(dst + 8), avg3_1);
+ _mm_store_si128((__m128i *)(dst + 16), avg3_2);
+ _mm_store_si128((__m128i *)(dst + 24), avg3_3);
+ dst += stride;
+ avg2_0 = _mm_alignr_epi8(avg2_1, avg2_0, 2);
+ avg2_1 = _mm_alignr_epi8(avg2_2, avg2_1, 2);
+ avg2_2 = _mm_alignr_epi8(avg2_3, avg2_2, 2);
+ avg2_3 = _mm_alignr_epi8(AR, avg2_3, 2);
+ avg3_0 = _mm_alignr_epi8(avg3_1, avg3_0, 2);
+ avg3_1 = _mm_alignr_epi8(avg3_2, avg3_1, 2);
+ avg3_2 = _mm_alignr_epi8(avg3_3, avg3_2, 2);
+ avg3_3 = _mm_alignr_epi8(AR, avg3_3, 2);
+ }
+ _mm_store_si128((__m128i *)dst, avg2_0);
+ _mm_store_si128((__m128i *)(dst + 8), avg2_1);
+ _mm_store_si128((__m128i *)(dst + 16), avg2_2);
+ _mm_store_si128((__m128i *)(dst + 24), avg2_3);
+ dst += stride;
+ _mm_store_si128((__m128i *)dst, avg3_0);
+ _mm_store_si128((__m128i *)(dst + 8), avg3_1);
+ _mm_store_si128((__m128i *)(dst + 16), avg3_2);
+ _mm_store_si128((__m128i *)(dst + 24), avg3_3);
+}