ref: 9b253f9f0a91ea4d00c40a70c274c746186302f6
parent: 2075af4b1666422d044ada2f85fe29f062f6ccd3
parent: d6423b316641ee55d1dda69f2e7fde8804405e4d
author: Johann Koenig <[email protected]>
date: Fri Jul 7 10:03:13 EDT 2017
Merge changes I7b36a57e,If2ab51e3,Ifc685a96 * changes: sad neon: macroize 8xN definitions sad neon: avg for 8x[4,8,16] sad neon: avg for 4x4 and 4x8
--- a/test/sad_test.cc
+++ b/test/sad_test.cc
@@ -657,6 +657,15 @@
};
INSTANTIATE_TEST_CASE_P(NEON, SADTest, ::testing::ValuesIn(neon_tests));
+const SadMxNAvgParam avg_neon_tests[] = {
+ SadMxNAvgParam(8, 16, &vpx_sad8x16_avg_neon),
+ SadMxNAvgParam(8, 8, &vpx_sad8x8_avg_neon),
+ SadMxNAvgParam(8, 4, &vpx_sad8x4_avg_neon),
+ SadMxNAvgParam(4, 8, &vpx_sad4x8_avg_neon),
+ SadMxNAvgParam(4, 4, &vpx_sad4x4_avg_neon),
+};
+INSTANTIATE_TEST_CASE_P(NEON, SADavgTest, ::testing::ValuesIn(avg_neon_tests));
+
const SadMxNx4Param x4d_neon_tests[] = {
SadMxNx4Param(64, 64, &vpx_sad64x64x4d_neon),
SadMxNx4Param(32, 32, &vpx_sad32x32x4d_neon),
--- a/vpx_dsp/arm/sad_neon.c
+++ b/vpx_dsp/arm/sad_neon.c
@@ -33,6 +33,18 @@
return horizontal_add_16x8(abs);
}
+uint32_t vpx_sad4x4_avg_neon(const uint8_t *src_ptr, int src_stride,
+ const uint8_t *ref_ptr, int ref_stride,
+ const uint8_t *second_pred) {
+ const uint8x16_t src_u8 = load_unaligned_u8q(src_ptr, src_stride);
+ const uint8x16_t ref_u8 = load_unaligned_u8q(ref_ptr, ref_stride);
+ const uint8x16_t second_pred_u8 = vld1q_u8(second_pred);
+ const uint8x16_t avg = vrhaddq_u8(ref_u8, second_pred_u8);
+ uint16x8_t abs = vabdl_u8(vget_low_u8(src_u8), vget_low_u8(avg));
+ abs = vabal_u8(abs, vget_high_u8(src_u8), vget_high_u8(avg));
+ return horizontal_add_16x8(abs);
+}
+
uint32_t vpx_sad4x8_neon(const uint8_t *src_ptr, int src_stride,
const uint8_t *ref_ptr, int ref_stride) {
int i;
@@ -49,6 +61,26 @@
return horizontal_add_16x8(abs);
}
+uint32_t vpx_sad4x8_avg_neon(const uint8_t *src_ptr, int src_stride,
+ const uint8_t *ref_ptr, int ref_stride,
+ const uint8_t *second_pred) {
+ int i;
+ uint16x8_t abs = vdupq_n_u16(0);
+ for (i = 0; i < 8; i += 4) {
+ const uint8x16_t src_u8 = load_unaligned_u8q(src_ptr, src_stride);
+ const uint8x16_t ref_u8 = load_unaligned_u8q(ref_ptr, ref_stride);
+ const uint8x16_t second_pred_u8 = vld1q_u8(second_pred);
+ const uint8x16_t avg = vrhaddq_u8(ref_u8, second_pred_u8);
+ src_ptr += 4 * src_stride;
+ ref_ptr += 4 * ref_stride;
+ second_pred += 16;
+ abs = vabal_u8(abs, vget_low_u8(src_u8), vget_low_u8(avg));
+ abs = vabal_u8(abs, vget_high_u8(src_u8), vget_high_u8(avg));
+ }
+
+ return horizontal_add_16x8(abs);
+}
+
static INLINE uint16x8_t sad8x(const uint8_t *a, int a_stride, const uint8_t *b,
int b_stride, const int height) {
int i;
@@ -64,23 +96,43 @@
return abs;
}
-uint32_t vpx_sad8x4_neon(const uint8_t *src, int src_stride, const uint8_t *ref,
- int ref_stride) {
- const uint16x8_t abs = sad8x(src, src_stride, ref, ref_stride, 4);
- return horizontal_add_16x8(abs);
-}
+static INLINE uint16x8_t sad8x_avg(const uint8_t *a, int a_stride,
+ const uint8_t *b, int b_stride,
+ const uint8_t *c, const int height) {
+ int i;
+ uint16x8_t abs = vdupq_n_u16(0);
-uint32_t vpx_sad8x8_neon(const uint8_t *src, int src_stride, const uint8_t *ref,
- int ref_stride) {
- const uint16x8_t abs = sad8x(src, src_stride, ref, ref_stride, 8);
- return horizontal_add_16x8(abs);
+ for (i = 0; i < height; ++i) {
+ const uint8x8_t a_u8 = vld1_u8(a);
+ const uint8x8_t b_u8 = vld1_u8(b);
+ const uint8x8_t c_u8 = vld1_u8(c);
+ const uint8x8_t avg = vrhadd_u8(b_u8, c_u8);
+ a += a_stride;
+ b += b_stride;
+ c += 8;
+ abs = vabal_u8(abs, a_u8, avg);
+ }
+ return abs;
}
-uint32_t vpx_sad8x16_neon(const uint8_t *src, int src_stride,
- const uint8_t *ref, int ref_stride) {
- const uint16x8_t abs = sad8x(src, src_stride, ref, ref_stride, 16);
- return horizontal_add_16x8(abs);
-}
+#define sad8xN(n) \
+ uint32_t vpx_sad8x##n##_neon(const uint8_t *src, int src_stride, \
+ const uint8_t *ref, int ref_stride) { \
+ const uint16x8_t abs = sad8x(src, src_stride, ref, ref_stride, n); \
+ return horizontal_add_16x8(abs); \
+ } \
+ \
+ uint32_t vpx_sad8x##n##_avg_neon(const uint8_t *src, int src_stride, \
+ const uint8_t *ref, int ref_stride, \
+ const uint8_t *second_pred) { \
+ const uint16x8_t abs = \
+ sad8x_avg(src, src_stride, ref, ref_stride, second_pred, n); \
+ return horizontal_add_16x8(abs); \
+ }
+
+sad8xN(4);
+sad8xN(8);
+sad8xN(16);
static INLINE uint16x8_t sad16x(const uint8_t *a, int a_stride,
const uint8_t *b, int b_stride,
--- a/vpx_dsp/vpx_dsp_rtcd_defs.pl
+++ b/vpx_dsp/vpx_dsp_rtcd_defs.pl
@@ -804,19 +804,19 @@
specialize qw/vpx_sad16x8_avg msa sse2 vsx/;
add_proto qw/unsigned int vpx_sad8x16_avg/, "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, const uint8_t *second_pred";
-specialize qw/vpx_sad8x16_avg msa sse2/;
+specialize qw/vpx_sad8x16_avg neon msa sse2/;
add_proto qw/unsigned int vpx_sad8x8_avg/, "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, const uint8_t *second_pred";
-specialize qw/vpx_sad8x8_avg msa sse2/;
+specialize qw/vpx_sad8x8_avg neon msa sse2/;
add_proto qw/unsigned int vpx_sad8x4_avg/, "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, const uint8_t *second_pred";
-specialize qw/vpx_sad8x4_avg msa sse2/;
+specialize qw/vpx_sad8x4_avg neon msa sse2/;
add_proto qw/unsigned int vpx_sad4x8_avg/, "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, const uint8_t *second_pred";
-specialize qw/vpx_sad4x8_avg msa sse2/;
+specialize qw/vpx_sad4x8_avg neon msa sse2/;
add_proto qw/unsigned int vpx_sad4x4_avg/, "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, const uint8_t *second_pred";
-specialize qw/vpx_sad4x4_avg msa sse2/;
+specialize qw/vpx_sad4x4_avg neon msa sse2/;
#
# Multi-block SAD, comparing a reference to N blocks 1 pixel apart horizontally