ref: 56d3f1573a86ae466e1284d637e5798a481519aa
parent: 817f68cdcff487aafa1106f4185cf2f569a74b9e
parent: 101981b736a7d5fbfef41a9785cac17070910e4b
author: Johann Koenig <[email protected]>
date: Tue Jul 18 16:42:39 EDT 2017
Merge changes I62c2e313,Ibd7a0337,I94e1d886 * changes: quantize test: test sse2 and avx optimizations quantize test: extend arrays quantize test: restrict and correct input
--- a/test/vp9_quantize_test.cc
+++ b/test/vp9_quantize_test.cc
@@ -30,7 +30,6 @@
using libvpx_test::Buffer;
namespace {
-#if CONFIG_VP9_HIGHBITDEPTH
const int number_of_iterations = 100;
typedef void (*QuantizeFunc)(const tran_low_t *coeff, intptr_t count,
@@ -85,20 +84,19 @@
ACMRandom rnd(ACMRandom::DeterministicSeed());
Buffer<tran_low_t> coeff = Buffer<tran_low_t>(16, 16, 0, 16);
ASSERT_TRUE(coeff.Init());
- DECLARE_ALIGNED(16, int16_t, zbin_ptr[2]);
- DECLARE_ALIGNED(16, int16_t, round_ptr[2]);
- DECLARE_ALIGNED(16, int16_t, quant_ptr[2]);
- DECLARE_ALIGNED(16, int16_t, quant_shift_ptr[2]);
- // These will need to be aligned to 32 when avx code is tested.
- Buffer<tran_low_t> qcoeff = Buffer<tran_low_t>(16, 16, 0, 16);
+ DECLARE_ALIGNED(16, int16_t, zbin_ptr[8]);
+ DECLARE_ALIGNED(16, int16_t, round_ptr[8]);
+ DECLARE_ALIGNED(16, int16_t, quant_ptr[8]);
+ DECLARE_ALIGNED(16, int16_t, quant_shift_ptr[8]);
+ DECLARE_ALIGNED(16, int16_t, dequant_ptr[8]);
+ Buffer<tran_low_t> qcoeff = Buffer<tran_low_t>(16, 16, 0, 32);
ASSERT_TRUE(qcoeff.Init());
- Buffer<tran_low_t> dqcoeff = Buffer<tran_low_t>(16, 16, 0, 16);
+ Buffer<tran_low_t> dqcoeff = Buffer<tran_low_t>(16, 16, 0, 32);
ASSERT_TRUE(dqcoeff.Init());
Buffer<tran_low_t> ref_qcoeff = Buffer<tran_low_t>(16, 16, 0);
ASSERT_TRUE(ref_qcoeff.Init());
Buffer<tran_low_t> ref_dqcoeff = Buffer<tran_low_t>(16, 16, 0);
ASSERT_TRUE(ref_dqcoeff.Init());
- DECLARE_ALIGNED(16, int16_t, dequant_ptr[2]);
uint16_t eob, ref_eob;
for (int i = 0; i < number_of_iterations; ++i) {
@@ -111,12 +109,28 @@
ref_eob = eob;
coeff.Set(&rnd, 0, max_value_);
for (int j = 0; j < 2; j++) {
- zbin_ptr[j] = rnd.RandRange(max_value_);
- round_ptr[j] = rnd.Rand16();
- quant_ptr[j] = rnd.Rand16();
- quant_shift_ptr[j] = rnd.Rand16();
- dequant_ptr[j] = rnd.Rand16();
+ // Values determined by deconstructing vp9_init_quantizer().
+ // zbin may be up to 1143 for 8 and 10 bit Y values, or 1200 for 12 bit Y
+ // values or U/V values of any bit depth. This is because y_delta is not
+ // factored into the vp9_ac_quant() call.
+ zbin_ptr[j] = rnd.RandRange(1200);
+ // round may be up to 685 for Y values or 914 for U/V.
+ round_ptr[j] = rnd.RandRange(914);
+ // quant ranges from 1 to -32703
+ quant_ptr[j] = rnd.RandRange(32704) - 32703;
+ // quant_shift goes up to 1 << 16.
+ quant_shift_ptr[j] = rnd.RandRange(16384);
+ // dequant maxes out at 1828 for all cases.
+ dequant_ptr[j] = rnd.RandRange(1828);
}
+ for (int j = 2; j < 8; j++) {
+ zbin_ptr[j] = zbin_ptr[1];
+ round_ptr[j] = round_ptr[1];
+ quant_ptr[j] = quant_ptr[1];
+ quant_shift_ptr[j] = quant_shift_ptr[1];
+ dequant_ptr[j] = dequant_ptr[1];
+ }
+
ref_quantize_op_(
coeff.TopLeftPixel(), count, skip_block, zbin_ptr, round_ptr, quant_ptr,
quant_shift_ptr, ref_qcoeff.TopLeftPixel(), ref_dqcoeff.TopLeftPixel(),
@@ -144,19 +158,19 @@
ACMRandom rnd(ACMRandom::DeterministicSeed());
Buffer<tran_low_t> coeff = Buffer<tran_low_t>(32, 32, 0, 16);
ASSERT_TRUE(coeff.Init());
- DECLARE_ALIGNED(16, int16_t, zbin_ptr[2]);
- DECLARE_ALIGNED(16, int16_t, round_ptr[2]);
- DECLARE_ALIGNED(16, int16_t, quant_ptr[2]);
- DECLARE_ALIGNED(16, int16_t, quant_shift_ptr[2]);
- Buffer<tran_low_t> qcoeff = Buffer<tran_low_t>(32, 32, 0, 16);
+ DECLARE_ALIGNED(16, int16_t, zbin_ptr[8]);
+ DECLARE_ALIGNED(16, int16_t, round_ptr[8]);
+ DECLARE_ALIGNED(16, int16_t, quant_ptr[8]);
+ DECLARE_ALIGNED(16, int16_t, quant_shift_ptr[8]);
+ DECLARE_ALIGNED(16, int16_t, dequant_ptr[8]);
+ Buffer<tran_low_t> qcoeff = Buffer<tran_low_t>(32, 32, 0, 32);
ASSERT_TRUE(qcoeff.Init());
- Buffer<tran_low_t> dqcoeff = Buffer<tran_low_t>(32, 32, 0, 16);
+ Buffer<tran_low_t> dqcoeff = Buffer<tran_low_t>(32, 32, 0, 32);
ASSERT_TRUE(dqcoeff.Init());
Buffer<tran_low_t> ref_qcoeff = Buffer<tran_low_t>(32, 32, 0);
ASSERT_TRUE(ref_qcoeff.Init());
Buffer<tran_low_t> ref_dqcoeff = Buffer<tran_low_t>(32, 32, 0);
ASSERT_TRUE(ref_dqcoeff.Init());
- DECLARE_ALIGNED(16, int16_t, dequant_ptr[2]);
uint16_t eob, ref_eob;
for (int i = 0; i < number_of_iterations; ++i) {
@@ -169,12 +183,20 @@
ref_eob = eob;
coeff.Set(&rnd, 0, max_value_);
for (int j = 0; j < 2; j++) {
- zbin_ptr[j] = rnd.RandRange(max_value_);
- round_ptr[j] = rnd.Rand16();
- quant_ptr[j] = rnd.Rand16();
- quant_shift_ptr[j] = rnd.Rand16();
- dequant_ptr[j] = rnd.Rand16();
+ zbin_ptr[j] = rnd.RandRange(1200);
+ round_ptr[j] = rnd.RandRange(914);
+ quant_ptr[j] = rnd.RandRange(32704) - 32703;
+ quant_shift_ptr[j] = rnd.RandRange(16384);
+ dequant_ptr[j] = rnd.RandRange(1828);
}
+ for (int j = 2; j < 8; j++) {
+ zbin_ptr[j] = zbin_ptr[1];
+ round_ptr[j] = round_ptr[1];
+ quant_ptr[j] = quant_ptr[1];
+ quant_shift_ptr[j] = quant_shift_ptr[1];
+ dequant_ptr[j] = dequant_ptr[1];
+ }
+
ref_quantize_op_(
coeff.TopLeftPixel(), count, skip_block, zbin_ptr, round_ptr, quant_ptr,
quant_shift_ptr, ref_qcoeff.TopLeftPixel(), ref_dqcoeff.TopLeftPixel(),
@@ -202,19 +224,19 @@
ACMRandom rnd(ACMRandom::DeterministicSeed());
Buffer<tran_low_t> coeff = Buffer<tran_low_t>(16, 16, 0, 16);
ASSERT_TRUE(coeff.Init());
- DECLARE_ALIGNED(16, int16_t, zbin_ptr[2]);
- DECLARE_ALIGNED(16, int16_t, round_ptr[2]);
- DECLARE_ALIGNED(16, int16_t, quant_ptr[2]);
- DECLARE_ALIGNED(16, int16_t, quant_shift_ptr[2]);
- Buffer<tran_low_t> qcoeff = Buffer<tran_low_t>(16, 16, 0, 16);
+ DECLARE_ALIGNED(16, int16_t, zbin_ptr[8]);
+ DECLARE_ALIGNED(16, int16_t, round_ptr[8]);
+ DECLARE_ALIGNED(16, int16_t, quant_ptr[8]);
+ DECLARE_ALIGNED(16, int16_t, quant_shift_ptr[8]);
+ DECLARE_ALIGNED(16, int16_t, dequant_ptr[8]);
+ Buffer<tran_low_t> qcoeff = Buffer<tran_low_t>(16, 16, 0, 32);
ASSERT_TRUE(qcoeff.Init());
- Buffer<tran_low_t> dqcoeff = Buffer<tran_low_t>(16, 16, 0, 16);
+ Buffer<tran_low_t> dqcoeff = Buffer<tran_low_t>(16, 16, 0, 32);
ASSERT_TRUE(dqcoeff.Init());
Buffer<tran_low_t> ref_qcoeff = Buffer<tran_low_t>(16, 16, 0);
ASSERT_TRUE(ref_qcoeff.Init());
Buffer<tran_low_t> ref_dqcoeff = Buffer<tran_low_t>(16, 16, 0);
ASSERT_TRUE(ref_dqcoeff.Init());
- DECLARE_ALIGNED(16, int16_t, dequant_ptr[2]);
uint16_t eob, ref_eob;
for (int i = 0; i < number_of_iterations; ++i) {
@@ -230,12 +252,19 @@
coeff.TopLeftPixel()[rnd(count)] = rnd.RandRange(max_value_);
coeff.TopLeftPixel()[rnd(count)] = rnd.RandRange(max_value_);
for (int j = 0; j < 2; j++) {
- zbin_ptr[j] = rnd.RandRange(max_value_);
- round_ptr[j] = rnd.Rand16();
- quant_ptr[j] = rnd.Rand16();
- quant_shift_ptr[j] = rnd.Rand16();
- dequant_ptr[j] = rnd.Rand16();
+ zbin_ptr[j] = rnd.RandRange(1200);
+ round_ptr[j] = rnd.RandRange(914);
+ quant_ptr[j] = rnd.RandRange(32704) - 32703;
+ quant_shift_ptr[j] = rnd.RandRange(16384);
+ dequant_ptr[j] = rnd.RandRange(1828);
}
+ for (int j = 2; j < 8; j++) {
+ zbin_ptr[j] = zbin_ptr[1];
+ round_ptr[j] = round_ptr[1];
+ quant_ptr[j] = quant_ptr[1];
+ quant_shift_ptr[j] = quant_shift_ptr[1];
+ dequant_ptr[j] = dequant_ptr[1];
+ }
ref_quantize_op_(
coeff.TopLeftPixel(), count, skip_block, zbin_ptr, round_ptr, quant_ptr,
@@ -264,19 +293,19 @@
ACMRandom rnd(ACMRandom::DeterministicSeed());
Buffer<tran_low_t> coeff = Buffer<tran_low_t>(32, 32, 0, 16);
ASSERT_TRUE(coeff.Init());
- DECLARE_ALIGNED(16, int16_t, zbin_ptr[2]);
- DECLARE_ALIGNED(16, int16_t, round_ptr[2]);
- DECLARE_ALIGNED(16, int16_t, quant_ptr[2]);
- DECLARE_ALIGNED(16, int16_t, quant_shift_ptr[2]);
- Buffer<tran_low_t> qcoeff = Buffer<tran_low_t>(32, 32, 0, 16);
+ DECLARE_ALIGNED(16, int16_t, zbin_ptr[8]);
+ DECLARE_ALIGNED(16, int16_t, round_ptr[8]);
+ DECLARE_ALIGNED(16, int16_t, quant_ptr[8]);
+ DECLARE_ALIGNED(16, int16_t, quant_shift_ptr[8]);
+ DECLARE_ALIGNED(16, int16_t, dequant_ptr[8]);
+ Buffer<tran_low_t> qcoeff = Buffer<tran_low_t>(32, 32, 0, 32);
ASSERT_TRUE(qcoeff.Init());
- Buffer<tran_low_t> dqcoeff = Buffer<tran_low_t>(32, 32, 0, 16);
+ Buffer<tran_low_t> dqcoeff = Buffer<tran_low_t>(32, 32, 0, 32);
ASSERT_TRUE(dqcoeff.Init());
Buffer<tran_low_t> ref_qcoeff = Buffer<tran_low_t>(32, 32, 0);
ASSERT_TRUE(ref_qcoeff.Init());
Buffer<tran_low_t> ref_dqcoeff = Buffer<tran_low_t>(32, 32, 0);
ASSERT_TRUE(ref_dqcoeff.Init());
- DECLARE_ALIGNED(16, int16_t, dequant_ptr[2]);
uint16_t eob, ref_eob;
for (int i = 0; i < number_of_iterations; ++i) {
@@ -292,12 +321,19 @@
coeff.TopLeftPixel()[rnd(count)] = rnd.RandRange(max_value_);
coeff.TopLeftPixel()[rnd(count)] = rnd.RandRange(max_value_);
for (int j = 0; j < 2; j++) {
- zbin_ptr[j] = rnd.RandRange(max_value_);
- round_ptr[j] = rnd.Rand16();
- quant_ptr[j] = rnd.Rand16();
- quant_shift_ptr[j] = rnd.Rand16();
- dequant_ptr[j] = rnd.Rand16();
+ zbin_ptr[j] = rnd.RandRange(1200);
+ round_ptr[j] = rnd.RandRange(914);
+ quant_ptr[j] = rnd.RandRange(32704) - 32703;
+ quant_shift_ptr[j] = rnd.RandRange(16384);
+ dequant_ptr[j] = rnd.RandRange(1828);
}
+ for (int j = 2; j < 8; j++) {
+ zbin_ptr[j] = zbin_ptr[1];
+ round_ptr[j] = round_ptr[1];
+ quant_ptr[j] = quant_ptr[1];
+ quant_shift_ptr[j] = quant_shift_ptr[1];
+ dequant_ptr[j] = dequant_ptr[1];
+ }
ref_quantize_op_(
coeff.TopLeftPixel(), count, skip_block, zbin_ptr, round_ptr, quant_ptr,
@@ -324,9 +360,12 @@
using std::tr1::make_tuple;
#if HAVE_SSE2
+#if CONFIG_VP9_HIGHBITDEPTH
INSTANTIATE_TEST_CASE_P(
SSE2, VP9QuantizeTest,
- ::testing::Values(make_tuple(&vpx_highbd_quantize_b_sse2,
+ ::testing::Values(make_tuple(&vpx_quantize_b_sse2, &vpx_highbd_quantize_b_c,
+ VPX_BITS_8),
+ make_tuple(&vpx_highbd_quantize_b_sse2,
&vpx_highbd_quantize_b_c, VPX_BITS_8),
make_tuple(&vpx_highbd_quantize_b_sse2,
&vpx_highbd_quantize_b_c, VPX_BITS_10),
@@ -340,6 +379,37 @@
&vpx_highbd_quantize_b_32x32_c, VPX_BITS_10),
make_tuple(&vpx_highbd_quantize_b_32x32_sse2,
&vpx_highbd_quantize_b_32x32_c, VPX_BITS_12)));
-#endif // HAVE_SSE2
+#else
+INSTANTIATE_TEST_CASE_P(SSE2, VP9QuantizeTest,
+ ::testing::Values(make_tuple(&vpx_quantize_b_sse2,
+ &vpx_quantize_b_c,
+ VPX_BITS_8)));
#endif // CONFIG_VP9_HIGHBITDEPTH
+#endif // HAVE_SSE2
+
+// TODO(johannkoenig): SSSE3 optimizations do not yet pass these tests.
+#if HAVE_SSSE3 && ARCH_X86_64
+INSTANTIATE_TEST_CASE_P(DISABLED_SSSE3, VP9QuantizeTest,
+ ::testing::Values(make_tuple(&vpx_quantize_b_ssse3,
+ &vpx_quantize_b_c,
+ VPX_BITS_8)));
+
+INSTANTIATE_TEST_CASE_P(
+ DISABLED_SSSE3, VP9Quantize32Test,
+ ::testing::Values(make_tuple(&vpx_quantize_b_32x32_ssse3,
+ &vpx_quantize_b_32x32_c, VPX_BITS_8)));
+#endif // HAVE_SSSE3 && ARCH_X86_64
+
+// TODO(johannkoenig): AVX optimizations do not yet pass the 32x32 test.
+#if HAVE_AVX && ARCH_X86_64
+INSTANTIATE_TEST_CASE_P(AVX, VP9QuantizeTest,
+ ::testing::Values(make_tuple(&vpx_quantize_b_avx,
+ &vpx_quantize_b_c,
+ VPX_BITS_8)));
+
+INSTANTIATE_TEST_CASE_P(DISABLED_AVX, VP9Quantize32Test,
+ ::testing::Values(make_tuple(&vpx_quantize_b_32x32_avx,
+ &vpx_quantize_b_32x32_c,
+ VPX_BITS_8)));
+#endif // HAVE_AVX && ARCH_X86_64
} // namespace