ref: 12cb30d4bd0e08d9bcca632e21f13c7a20d9ce17
parent: f85f79f6305d8ea4ef23fc91e4ce988af6a9eec6
parent: cae03a7ef57594c6d4762a83ff63eaa2a44c8ecd
author: Yunqing Wang <[email protected]>
date: Thu Apr 2 14:22:08 EDT 2015
Merge "Set vbp thresholds for aq3 boosted blocks"
--- a/vp9/encoder/vp9_encodeframe.c
+++ b/vp9/encoder/vp9_encodeframe.c
@@ -474,7 +474,7 @@
}
-void vp9_set_vbp_thresholds(VP9_COMP *cpi, int q) {
+void vp9_set_vbp_thresholds(VP9_COMP *cpi, int64_t thresholds[], int q) {
SPEED_FEATURES *const sf = &cpi->sf;
if (sf->partition_search_type != VAR_BASED_PARTITION &&
sf->partition_search_type != REFERENCE_PARTITION) {
@@ -491,20 +491,23 @@
// If 4x4 partition is not used, then 8x8 partition will be selected
// if variance of 16x16 block is very high, so use larger threshold
// for 16x16 (threshold_bsize_min) in that case.
+
+ // Array index: 0 - threshold_64x64; 1 - threshold_32x32;
+ // 2 - threshold_16x16; 3 - vbp_threshold_8x8;
if (is_key_frame) {
- cpi->vbp_threshold_64x64 = threshold_base;
- cpi->vbp_threshold_32x32 = threshold_base >> 2;
- cpi->vbp_threshold_16x16 = threshold_base >> 2;
- cpi->vbp_threshold_8x8 = threshold_base << 2;
+ thresholds[0] = threshold_base;
+ thresholds[1] = threshold_base >> 2;
+ thresholds[2] = threshold_base >> 2;
+ thresholds[3] = threshold_base << 2;
cpi->vbp_bsize_min = BLOCK_8X8;
} else {
- cpi->vbp_threshold_32x32 = threshold_base;
+ thresholds[1] = threshold_base;
if (cm->width <= 352 && cm->height <= 288) {
- cpi->vbp_threshold_64x64 = threshold_base >> 2;
- cpi->vbp_threshold_16x16 = threshold_base << 3;
+ thresholds[0] = threshold_base >> 2;
+ thresholds[2] = threshold_base << 3;
} else {
- cpi->vbp_threshold_64x64 = threshold_base;
- cpi->vbp_threshold_16x16 = threshold_base << cpi->oxcf.speed;
+ thresholds[0] = threshold_base;
+ thresholds[2] = threshold_base << cpi->oxcf.speed;
}
cpi->vbp_bsize_min = BLOCK_16X16;
}
@@ -606,6 +609,8 @@
int sp;
int dp;
int pixels_wide = 64, pixels_high = 64;
+ int64_t thresholds[4] = {cpi->vbp_thresholds[0], cpi->vbp_thresholds[1],
+ cpi->vbp_thresholds[2], cpi->vbp_thresholds[3]};
// Always use 4x4 partition for key frame.
const int is_key_frame = (cm->frame_type == KEY_FRAME);
@@ -618,6 +623,11 @@
const uint8_t *const map = cm->seg.update_map ? cpi->segmentation_map :
cm->last_frame_seg_map;
segment_id = vp9_get_segment_id(cm, map, BLOCK_64X64, mi_row, mi_col);
+
+ if (cyclic_refresh_segment_id_boosted(segment_id)) {
+ int q = vp9_get_qindex(&cm->seg, segment_id, cm->base_qindex);
+ vp9_set_vbp_thresholds(cpi, thresholds, q);
+ }
}
set_offsets(cpi, tile, x, mi_row, mi_col, BLOCK_64X64);
@@ -741,7 +751,7 @@
}
if (is_key_frame || (low_res &&
vt.split[i].split[j].part_variances.none.variance >
- (cpi->vbp_threshold_32x32 << 1))) {
+ (thresholds[1] << 1))) {
// Go down to 4x4 down-sampling for variance.
variance4x4downsample[i2 + j] = 1;
for (k = 0; k < 4; k++) {
@@ -761,11 +771,6 @@
}
}
- // No 64x64 blocks on segments other than base (un-boosted) segment,
- // so force split.
- if (cyclic_refresh_segment_id_boosted(segment_id))
- force_split[0] = 1;
-
// Fill the rest of the variance tree by summing split partition values.
for (i = 0; i < 4; i++) {
const int i2 = i << 2;
@@ -782,7 +787,7 @@
// If variance of this 32x32 block is above the threshold, force the block
// to split. This also forces a split on the upper (64x64) level.
get_variance(&vt.split[i].part_variances.none);
- if (vt.split[i].part_variances.none.variance > cpi->vbp_threshold_32x32) {
+ if (vt.split[i].part_variances.none.variance > thresholds[1]) {
force_split[i + 1] = 1;
force_split[0] = 1;
}
@@ -794,8 +799,7 @@
// we get to one that's got a variance lower than our threshold.
if ( mi_col + 8 > cm->mi_cols || mi_row + 8 > cm->mi_rows ||
!set_vt_partitioning(cpi, xd, &vt, BLOCK_64X64, mi_row, mi_col,
- cpi->vbp_threshold_64x64, BLOCK_16X16,
- force_split[0])) {
+ thresholds[0], BLOCK_16X16, force_split[0])) {
for (i = 0; i < 4; ++i) {
const int x32_idx = ((i & 1) << 2);
const int y32_idx = ((i >> 1) << 2);
@@ -802,8 +806,8 @@
const int i2 = i << 2;
if (!set_vt_partitioning(cpi, xd, &vt.split[i], BLOCK_32X32,
(mi_row + y32_idx), (mi_col + x32_idx),
- cpi->vbp_threshold_32x32,
- BLOCK_16X16, force_split[i + 1])) {
+ thresholds[1], BLOCK_16X16,
+ force_split[i + 1])) {
for (j = 0; j < 4; ++j) {
const int x16_idx = ((j & 1) << 1);
const int y16_idx = ((j >> 1) << 1);
@@ -816,8 +820,7 @@
if (!set_vt_partitioning(cpi, xd, vtemp, BLOCK_16X16,
mi_row + y32_idx + y16_idx,
mi_col + x32_idx + x16_idx,
- cpi->vbp_threshold_16x16,
- cpi->vbp_bsize_min, 0)) {
+ thresholds[2], cpi->vbp_bsize_min, 0)) {
for (k = 0; k < 4; ++k) {
const int x8_idx = (k & 1);
const int y8_idx = (k >> 1);
@@ -826,8 +829,7 @@
BLOCK_8X8,
mi_row + y32_idx + y16_idx + y8_idx,
mi_col + x32_idx + x16_idx + x8_idx,
- cpi->vbp_threshold_8x8,
- BLOCK_8X8, 0)) {
+ thresholds[3], BLOCK_8X8, 0)) {
set_block_size(cpi, xd,
(mi_row + y32_idx + y16_idx + y8_idx),
(mi_col + x32_idx + x16_idx + x8_idx),
--- a/vp9/encoder/vp9_encodeframe.h
+++ b/vp9/encoder/vp9_encodeframe.h
@@ -12,6 +12,8 @@
#ifndef VP9_ENCODER_VP9_ENCODEFRAME_H_
#define VP9_ENCODER_VP9_ENCODEFRAME_H_
+#include "vpx/vpx_integer.h"
+
#ifdef __cplusplus
extern "C" {
#endif
@@ -38,7 +40,7 @@
void vp9_encode_tile(struct VP9_COMP *cpi, struct ThreadData *td,
int tile_row, int tile_col);
-void vp9_set_vbp_thresholds(struct VP9_COMP *cpi, int q);
+void vp9_set_vbp_thresholds(struct VP9_COMP *cpi, int64_t thresholds[], int q);
#ifdef __cplusplus
} // extern "C"
--- a/vp9/encoder/vp9_encoder.c
+++ b/vp9/encoder/vp9_encoder.c
@@ -2958,7 +2958,7 @@
set_size_dependent_vars(cpi, &q, &bottom_index, &top_index);
vp9_set_quantizer(cm, q);
- vp9_set_vbp_thresholds(cpi, q);
+ vp9_set_vbp_thresholds(cpi, cpi->vbp_thresholds, q);
setup_frame(cpi);
--- a/vp9/encoder/vp9_encoder.h
+++ b/vp9/encoder/vp9_encoder.h
@@ -460,10 +460,9 @@
int resize_pending;
// VAR_BASED_PARTITION thresholds
- int64_t vbp_threshold_64x64;
- int64_t vbp_threshold_32x32;
- int64_t vbp_threshold_16x16;
- int64_t vbp_threshold_8x8;
+ // 0 - threshold_64x64; 1 - threshold_32x32;
+ // 2 - threshold_16x16; 3 - vbp_threshold_8x8;
+ int64_t vbp_thresholds[4];
BLOCK_SIZE vbp_bsize_min;
// Multi-threading