ref: c8f5a55df4267a1fd5fd7d47002ffe2dcb02702e
parent: 7b0e12934e75927e48df9326d3a71dbf844ee690
author: Luca Barbato <[email protected]>
date: Fri Apr 7 10:49:00 EDT 2017
ppc: tm predictor 8x8 About 5x faster. Change-Id: I951230517f49c0dca9ac9eac2efa8916a303b85a
--- a/test/test_intra_pred_speed.cc
+++ b/test/test_intra_pred_speed.cc
@@ -313,6 +313,9 @@
#endif // HAVE_MSA
#if HAVE_VSX
+INTRA_PRED_TEST(VSX, TestIntraPred8, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, vpx_tm_predictor_8x8_vsx)
+
INTRA_PRED_TEST(VSX, TestIntraPred16, NULL, NULL, NULL, NULL,
vpx_v_predictor_16x16_vsx, vpx_h_predictor_16x16_vsx, NULL,
NULL, NULL, NULL, NULL, NULL, NULL)
--- a/vpx_dsp/ppc/intrapred_vsx.c
+++ b/vpx_dsp/ppc/intrapred_vsx.c
@@ -179,3 +179,50 @@
H_PREDICTOR_32(v14_1);
H_PREDICTOR_32(v15_1);
}
+
+void vpx_tm_predictor_8x8_vsx(uint8_t *dst, ptrdiff_t stride,
+ const uint8_t *above, const uint8_t *left) {
+ const int16x8_t tl = unpack_to_s16_h(vec_splat(vec_vsx_ld(-1, above), 0));
+ const int16x8_t l = unpack_to_s16_h(vec_vsx_ld(0, left));
+ const int16x8_t a = unpack_to_s16_h(vec_vsx_ld(0, above));
+ int16x8_t tmp, val;
+
+ tmp = unpack_to_s16_l(vec_vsx_ld(0, dst));
+ val = vec_sub(vec_add(vec_splat(l, 0), a), tl);
+ vec_vsx_st(vec_packsu(val, tmp), 0, dst);
+ dst += stride;
+
+ tmp = unpack_to_s16_l(vec_vsx_ld(0, dst));
+ val = vec_sub(vec_add(vec_splat(l, 1), a), tl);
+ vec_vsx_st(vec_packsu(val, tmp), 0, dst);
+ dst += stride;
+
+ tmp = unpack_to_s16_l(vec_vsx_ld(0, dst));
+ val = vec_sub(vec_add(vec_splat(l, 2), a), tl);
+ vec_vsx_st(vec_packsu(val, tmp), 0, dst);
+ dst += stride;
+
+ tmp = unpack_to_s16_l(vec_vsx_ld(0, dst));
+ val = vec_sub(vec_add(vec_splat(l, 3), a), tl);
+ vec_vsx_st(vec_packsu(val, tmp), 0, dst);
+ dst += stride;
+
+ tmp = unpack_to_s16_l(vec_vsx_ld(0, dst));
+ val = vec_sub(vec_add(vec_splat(l, 4), a), tl);
+ vec_vsx_st(vec_packsu(val, tmp), 0, dst);
+ dst += stride;
+
+ tmp = unpack_to_s16_l(vec_vsx_ld(0, dst));
+ val = vec_sub(vec_add(vec_splat(l, 5), a), tl);
+ vec_vsx_st(vec_packsu(val, tmp), 0, dst);
+ dst += stride;
+
+ tmp = unpack_to_s16_l(vec_vsx_ld(0, dst));
+ val = vec_sub(vec_add(vec_splat(l, 6), a), tl);
+ vec_vsx_st(vec_packsu(val, tmp), 0, dst);
+ dst += stride;
+
+ tmp = unpack_to_s16_l(vec_vsx_ld(0, dst));
+ val = vec_sub(vec_add(vec_splat(l, 7), a), tl);
+ vec_vsx_st(vec_packsu(val, tmp), 0, dst);
+}
--- a/vpx_dsp/ppc/types_vsx.h
+++ b/vpx_dsp/ppc/types_vsx.h
@@ -20,4 +20,24 @@
typedef vector signed int int32x4_t;
typedef vector unsigned int uint32x4_t;
+#ifdef WORDS_BIGENDIAN
+#define unpack_to_u16_h(v) \
+ (uint16x8_t) vec_mergeh(vec_splat_u8(0), (uint8x16_t)v)
+#define unpack_to_u16_l(v) \
+ (uint16x8_t) vec_mergel(vec_splat_u8(0), (uint8x16_t)v)
+#define unpack_to_s16_h(v) \
+ (int16x8_t) vec_mergeh(vec_splat_u8(0), (uint8x16_t)v)
+#define unpack_to_s16_l(v) \
+ (int16x8_t) vec_mergel(vec_splat_u8(0), (uint8x16_t)v)
+#else
+#define unpack_to_u16_h(v) \
+ (uint16x8_t) vec_mergeh((uint8x16_t)v, vec_splat_u8(0))
+#define unpack_to_u16_l(v) \
+ (uint16x8_t) vec_mergel((uint8x16_t)v, vec_splat_u8(0))
+#define unpack_to_s16_h(v) \
+ (int16x8_t) vec_mergeh((uint8x16_t)v, vec_splat_u8(0))
+#define unpack_to_s16_l(v) \
+ (int16x8_t) vec_mergel((uint8x16_t)v, vec_splat_u8(0))
+#endif
+
#endif // VPX_DSP_PPC_TYPES_VSX_H_
--- a/vpx_dsp/vpx_dsp_rtcd_defs.pl
+++ b/vpx_dsp/vpx_dsp_rtcd_defs.pl
@@ -95,7 +95,7 @@
specialize qw/vpx_v_predictor_8x8 neon msa sse2/;
add_proto qw/void vpx_tm_predictor_8x8/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
-specialize qw/vpx_tm_predictor_8x8 neon dspr2 msa sse2/;
+specialize qw/vpx_tm_predictor_8x8 neon dspr2 msa sse2 vsx/;
add_proto qw/void vpx_dc_predictor_8x8/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
specialize qw/vpx_dc_predictor_8x8 dspr2 neon msa sse2/;