ref: d68d37872c82ab81da384a65ce17b684d4e2cc24
parent: f9d20e6df21296899143d832f1a8d7ed7985e395
author: Luca Barbato <[email protected]>
date: Fri Apr 7 20:39:24 EDT 2017
ppc: dc_128 predictor 32x32 6x faster. Change-Id: I1da8f51b4262871cb98f0aa03ccda41b0ac2b08b
--- a/test/test_intra_pred_speed.cc
+++ b/test/test_intra_pred_speed.cc
@@ -321,9 +321,10 @@
vpx_h_predictor_16x16_vsx, NULL, NULL, NULL, NULL, NULL, NULL,
vpx_tm_predictor_16x16_vsx)
-INTRA_PRED_TEST(VSX, TestIntraPred32, NULL, NULL, NULL, NULL,
- vpx_v_predictor_32x32_vsx, vpx_h_predictor_32x32_vsx, NULL,
- NULL, NULL, NULL, NULL, NULL, vpx_tm_predictor_32x32_vsx)
+INTRA_PRED_TEST(VSX, TestIntraPred32, NULL, NULL, NULL,
+ vpx_dc_128_predictor_32x32_vsx, vpx_v_predictor_32x32_vsx,
+ vpx_h_predictor_32x32_vsx, NULL, NULL, NULL, NULL, NULL, NULL,
+ vpx_tm_predictor_32x32_vsx)
#endif // HAVE_VSX
// -----------------------------------------------------------------------------
--- a/vpx_dsp/ppc/intrapred_vsx.c
+++ b/vpx_dsp/ppc/intrapred_vsx.c
@@ -379,3 +379,22 @@
dc_fill_predictor_16x16(dst, stride, v128);
}
+
+static INLINE void dc_fill_predictor_32x32(uint8_t *dst, const ptrdiff_t stride,
+ const uint8x16_t val) {
+ int i;
+
+ for (i = 0; i < 32; i++, dst += stride) {
+ vec_vsx_st(val, 0, dst);
+ vec_vsx_st(val, 16, dst);
+ }
+}
+
+void vpx_dc_128_predictor_32x32_vsx(uint8_t *dst, ptrdiff_t stride,
+ const uint8_t *above, const uint8_t *left) {
+ const uint8x16_t v128 = vec_sl(vec_splat_u8(1), vec_splat_u8(7));
+ (void)above;
+ (void)left;
+
+ dc_fill_predictor_32x32(dst, stride, v128);
+}
--- a/vpx_dsp/vpx_dsp_rtcd_defs.pl
+++ b/vpx_dsp/vpx_dsp_rtcd_defs.pl
@@ -183,7 +183,7 @@
specialize qw/vpx_dc_left_predictor_32x32 msa neon sse2/;
add_proto qw/void vpx_dc_128_predictor_32x32/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left";
-specialize qw/vpx_dc_128_predictor_32x32 msa neon sse2/;
+specialize qw/vpx_dc_128_predictor_32x32 msa neon sse2 vsx/;
# High bitdepth functions
if (vpx_config("CONFIG_VP9_HIGHBITDEPTH") eq "yes") {