ref: f48532e271c89144a98b931b3cf5bb721b936a7f
parent: 0b15bf1e5444b7cfb473005217b1e4fdb007a51c
author: Alexandra Hájková <[email protected]>
date: Mon May 8 08:10:04 EDT 2017
ppc: Add vpx_sad64x32/64_vsx Change-Id: I84e3705fa52f75cb91b2bab4abf5cc77585ee3e2
--- a/test/sad_test.cc
+++ b/test/sad_test.cc
@@ -924,6 +924,8 @@
// VSX functions
#if HAVE_VSX
const SadMxNParam vsx_tests[] = {
+ SadMxNParam(64, 64, &vpx_sad64x64_vsx),
+ SadMxNParam(64, 32, &vpx_sad64x32_vsx),
SadMxNParam(32, 64, &vpx_sad32x64_vsx),
SadMxNParam(32, 32, &vpx_sad32x32_vsx),
SadMxNParam(32, 16, &vpx_sad32x16_vsx),
--- a/vpx_dsp/ppc/sad_vsx.c
+++ b/vpx_dsp/ppc/sad_vsx.c
@@ -69,6 +69,29 @@
return sad[3] + sad[2] + sad[1] + sad[0]; \
}
+#define SAD64(height) \
+ unsigned int vpx_sad64x##height##_vsx(const uint8_t *a, int a_stride, \
+ const uint8_t *b, int b_stride) { \
+ int y; \
+ unsigned int sad[4]; \
+ uint8x16_t v_a, v_b; \
+ int16x8_t v_ah, v_al, v_bh, v_bl, v_absh, v_absl, v_subh, v_subl; \
+ int32x4_t v_sad = vec_splat_s32(0); \
+ \
+ for (y = 0; y < height; y++) { \
+ PROCESS16(0); \
+ PROCESS16(16); \
+ PROCESS16(32); \
+ PROCESS16(48); \
+ \
+ a += a_stride; \
+ b += b_stride; \
+ } \
+ vec_vsx_st((uint32x4_t)v_sad, 0, sad); \
+ \
+ return sad[3] + sad[2] + sad[1] + sad[0]; \
+ }
+
SAD16(8);
SAD16(16);
SAD16(32);
@@ -75,3 +98,5 @@
SAD32(16);
SAD32(32);
SAD32(64);
+SAD64(32);
+SAD64(64);
--- a/vpx_dsp/vpx_dsp_rtcd_defs.pl
+++ b/vpx_dsp/vpx_dsp_rtcd_defs.pl
@@ -696,10 +696,10 @@
# Single block SAD
#
add_proto qw/unsigned int vpx_sad64x64/, "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride";
-specialize qw/vpx_sad64x64 avx2 neon msa sse2/;
+specialize qw/vpx_sad64x64 avx2 neon msa sse2 vsx/;
add_proto qw/unsigned int vpx_sad64x32/, "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride";
-specialize qw/vpx_sad64x32 avx2 msa sse2/;
+specialize qw/vpx_sad64x32 avx2 msa sse2 vsx/;
add_proto qw/unsigned int vpx_sad32x64/, "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride";
specialize qw/vpx_sad32x64 avx2 msa sse2 vsx/;