shithub: libvpx

Download patch

ref: e1f127c4b5a3b02b94006c81545233570dca8a7c
parent: 8894c766c6d9460d12e52d350e7084b143d6109b
author: Jerome Jiang <[email protected]>
date: Mon Aug 5 11:38:39 EDT 2019

Fix vp9_quantize_fp(_32x32)_neon for HBD

In high bitdepth build, Neon code would outrange because of use of
int16x8_t and vmulq_s16.
C code always truncate outrange values.

Change-Id: I33a968b8d812e3c8477f3a61d84482758a3f8b21

--- a/vp9/encoder/arm/neon/vp9_quantize_neon.c
+++ b/vp9/encoder/arm/neon/vp9_quantize_neon.c
@@ -26,6 +26,22 @@
 #include "vpx_dsp/arm/mem_neon.h"
 #include "vpx_dsp/vpx_dsp_common.h"
 
+static INLINE void calculate_dqcoeff_and_store(const int16x8_t qcoeff,
+                                               const int16x8_t dequant,
+                                               tran_low_t *dqcoeff) {
+  const int32x4_t dqcoeff_0 =
+      vmull_s16(vget_low_s16(qcoeff), vget_low_s16(dequant));
+  const int32x4_t dqcoeff_1 =
+      vmull_s16(vget_high_s16(qcoeff), vget_high_s16(dequant));
+
+#if CONFIG_VP9_HIGHBITDEPTH
+  vst1q_s32(dqcoeff, dqcoeff_0);
+  vst1q_s32(dqcoeff + 4, dqcoeff_1);
+#else
+  vst1q_s16(dqcoeff, vcombine_s16(vmovn_s32(dqcoeff_0), vmovn_s32(dqcoeff_1)));
+#endif  // CONFIG_VP9_HIGHBITDEPTH
+}
+
 void vp9_quantize_fp_neon(const tran_low_t *coeff_ptr, intptr_t count,
                           int skip_block, const int16_t *round_ptr,
                           const int16_t *quant_ptr, tran_low_t *qcoeff_ptr,
@@ -68,10 +84,9 @@
     const int16x8_t v_nz_iscan = vbslq_s16(v_nz_mask, v_zero, v_iscan_plus1);
     const int16x8_t v_qcoeff_a = veorq_s16(v_tmp2, v_coeff_sign);
     const int16x8_t v_qcoeff = vsubq_s16(v_qcoeff_a, v_coeff_sign);
-    const int16x8_t v_dqcoeff = vmulq_s16(v_qcoeff, v_dequant);
+    calculate_dqcoeff_and_store(v_qcoeff, v_dequant, dqcoeff_ptr);
     v_eobmax_76543210 = vmaxq_s16(v_eobmax_76543210, v_nz_iscan);
     store_s16q_to_tran_low(qcoeff_ptr, v_qcoeff);
-    store_s16q_to_tran_low(dqcoeff_ptr, v_dqcoeff);
     v_round = vmovq_n_s16(round_ptr[1]);
     v_quant = vmovq_n_s16(quant_ptr[1]);
     v_dequant = vmovq_n_s16(dequant_ptr[1]);
@@ -94,10 +109,9 @@
     const int16x8_t v_nz_iscan = vbslq_s16(v_nz_mask, v_zero, v_iscan_plus1);
     const int16x8_t v_qcoeff_a = veorq_s16(v_tmp2, v_coeff_sign);
     const int16x8_t v_qcoeff = vsubq_s16(v_qcoeff_a, v_coeff_sign);
-    const int16x8_t v_dqcoeff = vmulq_s16(v_qcoeff, v_dequant);
+    calculate_dqcoeff_and_store(v_qcoeff, v_dequant, dqcoeff_ptr + i);
     v_eobmax_76543210 = vmaxq_s16(v_eobmax_76543210, v_nz_iscan);
     store_s16q_to_tran_low(qcoeff_ptr + i, v_qcoeff);
-    store_s16q_to_tran_low(dqcoeff_ptr + i, v_dqcoeff);
   }
 #ifdef __aarch64__
   *eob_ptr = vmaxvq_s16(v_eobmax_76543210);
@@ -150,7 +164,6 @@
 
   int16x8_t qcoeff = vqaddq_s16(coeff_abs, round);
   int32x4_t dqcoeff_0, dqcoeff_1;
-  int16x8_t dqcoeff;
   uint16x8_t eob_max;
   (void)scan;
   (void)count;
@@ -172,13 +185,17 @@
   // Add 1 if negative to round towards zero because the C uses division.
   dqcoeff_0 = vaddq_s32(dqcoeff_0, extract_sign_bit(dqcoeff_0));
   dqcoeff_1 = vaddq_s32(dqcoeff_1, extract_sign_bit(dqcoeff_1));
+#if CONFIG_VP9_HIGHBITDEPTH
+  vst1q_s32(dqcoeff_ptr, vshrq_n_s32(dqcoeff_0, 1));
+  vst1q_s32(dqcoeff_ptr + 4, vshrq_n_s32(dqcoeff_1, 1));
+#else
+  store_s16q_to_tran_low(dqcoeff_ptr, vcombine_s16(vshrn_n_s32(dqcoeff_0, 1),
+                                                   vshrn_n_s32(dqcoeff_1, 1)));
+#endif
 
-  dqcoeff = vcombine_s16(vshrn_n_s32(dqcoeff_0, 1), vshrn_n_s32(dqcoeff_1, 1));
-
   eob_max = vandq_u16(vtstq_s16(qcoeff, neg_one), v_iscan);
 
   store_s16q_to_tran_low(qcoeff_ptr, qcoeff);
-  store_s16q_to_tran_low(dqcoeff_ptr, dqcoeff);
 
   iscan += 8;
   coeff_ptr += 8;
@@ -204,7 +221,6 @@
 
       int16x8_t qcoeff = vqaddq_s16(coeff_abs, round);
       int32x4_t dqcoeff_0, dqcoeff_1;
-      int16x8_t dqcoeff;
 
       qcoeff = vqdmulhq_s16(qcoeff, quant);
       qcoeff = veorq_s16(qcoeff, coeff_sign);
@@ -217,14 +233,19 @@
       dqcoeff_0 = vaddq_s32(dqcoeff_0, extract_sign_bit(dqcoeff_0));
       dqcoeff_1 = vaddq_s32(dqcoeff_1, extract_sign_bit(dqcoeff_1));
 
-      dqcoeff =
-          vcombine_s16(vshrn_n_s32(dqcoeff_0, 1), vshrn_n_s32(dqcoeff_1, 1));
+#if CONFIG_VP9_HIGHBITDEPTH
+      vst1q_s32(dqcoeff_ptr, vshrq_n_s32(dqcoeff_0, 1));
+      vst1q_s32(dqcoeff_ptr + 4, vshrq_n_s32(dqcoeff_1, 1));
+#else
+      store_s16q_to_tran_low(
+          dqcoeff_ptr,
+          vcombine_s16(vshrn_n_s32(dqcoeff_0, 1), vshrn_n_s32(dqcoeff_1, 1)));
+#endif
 
       eob_max =
           vmaxq_u16(eob_max, vandq_u16(vtstq_s16(qcoeff, neg_one), v_iscan));
 
       store_s16q_to_tran_low(qcoeff_ptr, qcoeff);
-      store_s16q_to_tran_low(dqcoeff_ptr, dqcoeff);
 
       iscan += 8;
       coeff_ptr += 8;