shithub: libvpx

Download patch

ref: dd07443f7253905a2b8804713d02bcc789de2466
parent: 7ed1d54ab4b75037e90c46135cfe01b7dc47fa4d
author: jackychen <[email protected]>
date: Fri Jun 24 06:42:00 EDT 2016

vp9: Code clean, move low temp var logic out of choose_partitioning.

Change-Id: I7093e74131e0964471c9993c1e972b4617c4731d

--- a/vp9/encoder/vp9_encodeframe.c
+++ b/vp9/encoder/vp9_encodeframe.c
@@ -729,6 +729,65 @@
 }
 #endif
 
+static void set_low_temp_var_flag(VP9_COMP *cpi, MACROBLOCK *x,
+                                  MACROBLOCKD *xd, v64x64 *vt,
+                                  int force_split[], int64_t thresholds[],
+                                  MV_REFERENCE_FRAME ref_frame_partition,
+                                  int mi_col, int mi_row) {
+  int i, j;
+  VP9_COMMON * const cm = &cpi->common;
+  const int mv_thr = cm->width > 640 ? 8 : 4;
+  // Check temporal variance for bsize >= 16x16, if LAST_FRAME was selected and
+  // int_pro mv is small. If the temporal variance is small set the flag
+  // variance_low for the block. The variance threshold can be adjusted, the
+  // higher the more aggressive.
+  if (ref_frame_partition == LAST_FRAME &&
+      (cpi->sf.short_circuit_low_temp_var == 1 ||
+       (xd->mi[0]->mv[0].as_mv.col < mv_thr &&
+        xd->mi[0]->mv[0].as_mv.col > -mv_thr &&
+        xd->mi[0]->mv[0].as_mv.row < mv_thr &&
+        xd->mi[0]->mv[0].as_mv.row > -mv_thr))) {
+    if (xd->mi[0]->sb_type == BLOCK_64X64 &&
+        (vt->part_variances).none.variance < (thresholds[0] >> 1)) {
+      x->variance_low[0] = 1;
+    } else if (xd->mi[0]->sb_type == BLOCK_64X32) {
+      for (i = 0; i < 2; i++) {
+        if (vt->part_variances.horz[i].variance < (thresholds[0] >> 2))
+          x->variance_low[i + 1] = 1;
+      }
+    } else if (xd->mi[0]->sb_type == BLOCK_32X64) {
+      for (i = 0; i < 2; i++) {
+        if (vt->part_variances.vert[i].variance < (thresholds[0] >> 2))
+          x->variance_low[i + 3] = 1;
+      }
+    } else {
+      for (i = 0; i < 4; i++) {
+        if (!force_split[i + 1]) {
+          // 32x32
+          if (vt->split[i].part_variances.none.variance <
+              (thresholds[1] >> 1))
+            x->variance_low[i + 5] = 1;
+        } else if (cpi->sf.short_circuit_low_temp_var == 2) {
+          int idx[4] = {0, 4, xd->mi_stride << 2, (xd->mi_stride << 2) + 4};
+          const int idx_str = cm->mi_stride * mi_row + mi_col + idx[i];
+          MODE_INFO **this_mi = cm->mi_grid_visible + idx_str;
+          // For 32x16 and 16x32 blocks, the flag is set on each 16x16 block
+          // inside.
+          if ((*this_mi)->sb_type == BLOCK_16X16 ||
+              (*this_mi)->sb_type == BLOCK_32X16 ||
+              (*this_mi)->sb_type == BLOCK_16X32) {
+            for (j = 0; j < 4; j++) {
+              if (vt->split[i].split[j].part_variances.none.variance <
+                  (thresholds[2] >> 8))
+                x->variance_low[(i << 2) + j + 9] = 1;
+            }
+          }
+        }
+      }
+    }
+  }
+}
+
 // This function chooses partitioning based on the variance between source and
 // reconstructed last, where variance is computed for down-sampled inputs.
 static int choose_partitioning(VP9_COMP *cpi,
@@ -1084,56 +1143,8 @@
   }
 
   if (cpi->sf.short_circuit_low_temp_var) {
-    const int mv_thr = cm->width > 640 ? 8 : 4;
-    // Check temporal variance for bsize >= 16x16, if LAST_FRAME was selected
-    // and int_pro mv is small. If the temporal variance is small set the
-    // variance_low flag for the block. The variance threshold can be adjusted,
-    // the higher the more aggressive.
-    if (ref_frame_partition == LAST_FRAME &&
-        (cpi->sf.short_circuit_low_temp_var == 1 ||
-         (xd->mi[0]->mv[0].as_mv.col < mv_thr &&
-          xd->mi[0]->mv[0].as_mv.col > -mv_thr &&
-          xd->mi[0]->mv[0].as_mv.row < mv_thr &&
-          xd->mi[0]->mv[0].as_mv.row > -mv_thr))) {
-      if (xd->mi[0]->sb_type == BLOCK_64X64 &&
-          vt.part_variances.none.variance < (thresholds[0] >> 1)) {
-        x->variance_low[0] = 1;
-      } else if (xd->mi[0]->sb_type == BLOCK_64X32) {
-        for (j = 0; j < 2; j++) {
-          if (vt.part_variances.horz[j].variance < (thresholds[0] >> 2))
-            x->variance_low[j + 1] = 1;
-        }
-      } else if (xd->mi[0]->sb_type == BLOCK_32X64) {
-        for (j = 0; j < 2; j++) {
-          if (vt.part_variances.vert[j].variance < (thresholds[0] >> 2))
-            x->variance_low[j + 3] = 1;
-        }
-      } else {
-        for (i = 0; i < 4; i++) {
-          if (!force_split[i + 1]) {
-            // 32x32
-            if (vt.split[i].part_variances.none.variance <
-                (thresholds[1] >> 1))
-              x->variance_low[i + 5] = 1;
-          } else if (cpi->sf.short_circuit_low_temp_var == 2) {
-            int idx[4] = {0, 4, xd->mi_stride << 2, (xd->mi_stride << 2) + 4};
-            const int idx_str = cm->mi_stride * mi_row + mi_col + idx[i];
-            MODE_INFO **this_mi = cm->mi_grid_visible + idx_str;
-            // For 32x16 and 16x32 blocks, the flag is set on each 16x16 block
-            // inside.
-            if ((*this_mi)->sb_type == BLOCK_16X16 ||
-                (*this_mi)->sb_type == BLOCK_32X16 ||
-                (*this_mi)->sb_type == BLOCK_16X32) {
-              for (j = 0; j < 4; j++) {
-                if (vt.split[i].split[j].part_variances.none.variance <
-                    (thresholds[2] >> 8))
-                  x->variance_low[(i << 2) + j + 9] = 1;
-              }
-            }
-          }
-        }
-      }
-    }
+    set_low_temp_var_flag(cpi, x, xd, &vt, force_split, thresholds,
+                          ref_frame_partition, mi_col, mi_row);
   }
   return 0;
 }