ref: b3933e2d3ccf0de83441eac9d22a145c20ac6803
parent: b8ec5dcdf8f5bc40a137b988437ef5ff3931b03a
parent: 87bf1a149c2e6f89ed0a6fd804a1f4dc54e346be
author: Yaowu Xu <[email protected]>
date: Tue Jun 21 20:12:58 EDT 2016
Merge "Fix ubsan warnings: vp9/encoder/vp9_mcomp.c"
--- a/vp9/encoder/vp9_mbgraph.c
+++ b/vp9/encoder/vp9_mbgraph.c
@@ -59,8 +59,8 @@
// Try sub-pixel MC
// if (bestsme > error_thresh && bestsme < INT_MAX)
{
- int distortion;
- unsigned int sse;
+ uint32_t distortion;
+ uint32_t sse;
cpi->find_fractional_mv_step(
x, dst_mv, ref_mv, cpi->common.allow_high_precision_mv, x->errorperbit,
&v_fn_ptr, 0, mv_sf->subpel_iters_per_step,
--- a/vp9/encoder/vp9_mcomp.c
+++ b/vp9/encoder/vp9_mcomp.c
@@ -162,9 +162,36 @@
return &buf[(r >> 3) * stride + (c >> 3)];
}
+#if CONFIG_VP9_HIGHBITDEPTH
/* checks if (r, c) has better score than previous best */
#define CHECK_BETTER(v, r, c) \
if (c >= minc && c <= maxc && r >= minr && r <= maxr) { \
+ int64_t tmpmse; \
+ if (second_pred == NULL) { \
+ thismse = vfp->svf(pre(y, y_stride, r, c), y_stride, sp(c), \
+ sp(r), z, src_stride, &sse); \
+ } else { \
+ thismse = vfp->svaf(pre(y, y_stride, r, c), y_stride, sp(c), \
+ sp(r), z, src_stride, &sse, second_pred); \
+ } \
+ tmpmse = thismse; \
+ tmpmse += MVC(r, c); \
+ if (tmpmse >= INT_MAX) { \
+ v = INT_MAX; \
+ } else if ((v = (uint32_t)tmpmse) < besterr) { \
+ besterr = v; \
+ br = r; \
+ bc = c; \
+ *distortion = thismse; \
+ *sse1 = sse; \
+ } \
+ } else { \
+ v = INT_MAX; \
+ }
+#else
+/* checks if (r, c) has better score than previous best */
+#define CHECK_BETTER(v, r, c) \
+ if (c >= minc && c <= maxc && r >= minr && r <= maxr) { \
if (second_pred == NULL) \
thismse = vfp->svf(pre(y, y_stride, r, c), y_stride, sp(c), sp(r), z, \
src_stride, &sse); \
@@ -182,6 +209,7 @@
v = INT_MAX; \
}
+#endif
#define FIRST_LEVEL_CHECKS \
{ \
unsigned int left, right, up, down, diag; \
@@ -310,10 +338,10 @@
const uint8_t *second_pred,
int w, int h, int offset,
int *mvjcost, int *mvcost[2],
- unsigned int *sse1,
- int *distortion) {
- unsigned int besterr;
+ uint32_t *sse1,
+ uint32_t *distortion) {
#if CONFIG_VP9_HIGHBITDEPTH
+ uint64_t besterr;
if (second_pred != NULL) {
if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
DECLARE_ALIGNED(16, uint16_t, comp_pred16[64 * 64]);
@@ -329,9 +357,13 @@
} else {
besterr = vfp->vf(y + offset, y_stride, src, src_stride, sse1);
}
- *distortion = besterr;
+ *distortion = (uint32_t)besterr;
besterr += mv_err_cost(bestmv, ref_mv, mvjcost, mvcost, error_per_bit);
+ if (besterr >= UINT32_MAX)
+ return UINT32_MAX;
+ return (uint32_t)besterr;
#else
+ uint32_t besterr;
(void) xd;
if (second_pred != NULL) {
DECLARE_ALIGNED(16, uint8_t, comp_pred[64 * 64]);
@@ -342,8 +374,8 @@
}
*distortion = besterr;
besterr += mv_err_cost(bestmv, ref_mv, mvjcost, mvcost, error_per_bit);
-#endif // CONFIG_VP9_HIGHBITDEPTH
return besterr;
+#endif // CONFIG_VP9_HIGHBITDEPTH
}
static INLINE int divide_and_round(const int n, const int d) {
@@ -373,7 +405,7 @@
(cost_list[4] - 2 * cost_list[0] + cost_list[2]));
}
-int vp9_skip_sub_pixel_tree(
+uint32_t vp9_skip_sub_pixel_tree(
const MACROBLOCK *x,
MV *bestmv, const MV *ref_mv,
int allow_hp,
@@ -383,8 +415,8 @@
int iters_per_step,
int *cost_list,
int *mvjcost, int *mvcost[2],
- int *distortion,
- unsigned int *sse1,
+ uint32_t *distortion,
+ uint32_t *sse1,
const uint8_t *second_pred,
int w, int h) {
SETUP_SUBPEL_SEARCH;
@@ -418,7 +450,7 @@
return besterr;
}
-int vp9_find_best_sub_pixel_tree_pruned_evenmore(
+uint32_t vp9_find_best_sub_pixel_tree_pruned_evenmore(
const MACROBLOCK *x,
MV *bestmv, const MV *ref_mv,
int allow_hp,
@@ -428,8 +460,8 @@
int iters_per_step,
int *cost_list,
int *mvjcost, int *mvcost[2],
- int *distortion,
- unsigned int *sse1,
+ uint32_t *distortion,
+ uint32_t *sse1,
const uint8_t *second_pred,
int w, int h) {
SETUP_SUBPEL_SEARCH;
@@ -498,7 +530,7 @@
return besterr;
}
-int vp9_find_best_sub_pixel_tree_pruned_more(const MACROBLOCK *x,
+uint32_t vp9_find_best_sub_pixel_tree_pruned_more(const MACROBLOCK *x,
MV *bestmv, const MV *ref_mv,
int allow_hp,
int error_per_bit,
@@ -507,8 +539,8 @@
int iters_per_step,
int *cost_list,
int *mvjcost, int *mvcost[2],
- int *distortion,
- unsigned int *sse1,
+ uint32_t *distortion,
+ uint32_t *sse1,
const uint8_t *second_pred,
int w, int h) {
SETUP_SUBPEL_SEARCH;
@@ -572,7 +604,7 @@
return besterr;
}
-int vp9_find_best_sub_pixel_tree_pruned(const MACROBLOCK *x,
+uint32_t vp9_find_best_sub_pixel_tree_pruned(const MACROBLOCK *x,
MV *bestmv, const MV *ref_mv,
int allow_hp,
int error_per_bit,
@@ -581,8 +613,8 @@
int iters_per_step,
int *cost_list,
int *mvjcost, int *mvcost[2],
- int *distortion,
- unsigned int *sse1,
+ uint32_t *distortion,
+ uint32_t *sse1,
const uint8_t *second_pred,
int w, int h) {
SETUP_SUBPEL_SEARCH;
@@ -674,19 +706,19 @@
{0, -1}, {0, 1}, {-1, 0}, {1, 0}
};
-int vp9_find_best_sub_pixel_tree(const MACROBLOCK *x,
- MV *bestmv, const MV *ref_mv,
- int allow_hp,
- int error_per_bit,
- const vp9_variance_fn_ptr_t *vfp,
- int forced_stop,
- int iters_per_step,
- int *cost_list,
- int *mvjcost, int *mvcost[2],
- int *distortion,
- unsigned int *sse1,
- const uint8_t *second_pred,
- int w, int h) {
+uint32_t vp9_find_best_sub_pixel_tree(const MACROBLOCK *x,
+ MV *bestmv, const MV *ref_mv,
+ int allow_hp,
+ int error_per_bit,
+ const vp9_variance_fn_ptr_t *vfp,
+ int forced_stop,
+ int iters_per_step,
+ int *cost_list,
+ int *mvjcost, int *mvcost[2],
+ uint32_t *distortion,
+ uint32_t *sse1,
+ const uint8_t *second_pred,
+ int w, int h) {
const uint8_t *const z = x->plane[0].src.buf;
const uint8_t *const src_address = z;
const int src_stride = x->plane[0].src.stride;
@@ -1381,12 +1413,22 @@
const struct buf_2d *const what = &x->plane[0].src;
const struct buf_2d *const in_what = &xd->plane[0].pre[0];
const MV mv = {best_mv->row * 8, best_mv->col * 8};
- unsigned int unused;
-
+ uint32_t unused;
+#if CONFIG_VP9_HIGHBITDEPTH
+ uint64_t err= vfp->vf(what->buf, what->stride,
+ get_buf_from_mv(in_what, best_mv),
+ in_what->stride, &unused);
+ err += (use_mvcost ? mv_err_cost(&mv, center_mv, x->nmvjointcost,
+ x->mvcost, x->errorperbit) : 0);
+ if (err >= INT_MAX)
+ return INT_MAX;
+ return (int)err;
+#else
return vfp->vf(what->buf, what->stride,
get_buf_from_mv(in_what, best_mv), in_what->stride, &unused) +
(use_mvcost ? mv_err_cost(&mv, center_mv, x->nmvjointcost,
x->mvcost, x->errorperbit) : 0);
+#endif
}
int vp9_get_mvpred_av_var(const MACROBLOCK *x,
--- a/vp9/encoder/vp9_mcomp.h
+++ b/vp9/encoder/vp9_mcomp.h
@@ -74,7 +74,7 @@
BLOCK_SIZE bsize,
int mi_row, int mi_col);
-typedef int (fractional_mv_step_fp) (
+typedef uint32_t (fractional_mv_step_fp) (
const MACROBLOCK *x,
MV *bestmv, const MV *ref_mv,
int allow_hp,
@@ -84,7 +84,7 @@
int iters_per_step,
int *cost_list,
int *mvjcost, int *mvcost[2],
- int *distortion, unsigned int *sse1,
+ uint32_t *distortion, uint32_t *sse1,
const uint8_t *second_pred,
int w, int h);
--- a/vp9/encoder/vp9_pickmode.c
+++ b/vp9/encoder/vp9_pickmode.c
@@ -157,7 +157,7 @@
const int ref = mi->ref_frame[0];
const MV ref_mv = x->mbmi_ext->ref_mvs[ref][0].as_mv;
MV center_mv;
- int dis;
+ uint32_t dis;
int rate_mode;
const int tmp_col_min = x->mv_col_min;
const int tmp_col_max = x->mv_col_max;
@@ -1564,7 +1564,8 @@
!cpi->use_svc &&
cpi->oxcf.rc_mode == VPX_CBR) {
int tmp_sad;
- int dis, cost_list[5];
+ uint32_t dis;
+ int cost_list[5];
if (bsize < BLOCK_16X16)
continue;
@@ -2175,7 +2176,7 @@
const int tmp_col_max = x->mv_col_max;
const int tmp_row_min = x->mv_row_min;
const int tmp_row_max = x->mv_row_max;
- int dummy_dist;
+ uint32_t dummy_dist;
if (i == 0) {
mvp_full.row = b_mv[NEARESTMV].as_mv.row >> 3;
--- a/vp9/encoder/vp9_rdopt.c
+++ b/vp9/encoder/vp9_rdopt.c
@@ -1715,8 +1715,8 @@
x->mv_row_max = tmp_row_max;
if (bestsme < INT_MAX) {
- int dis; /* TODO: use dis in distortion calculation later. */
- unsigned int sse;
+ uint32_t dis; /* TODO: use dis in distortion calculation later. */
+ uint32_t sse;
bestsme = cpi->find_fractional_mv_step(
x, &tmp_mv,
&ref_mv[id].as_mv,
@@ -1916,7 +1916,7 @@
INT_MAX, 1);
if (bestsme < INT_MAX) {
- int distortion;
+ uint32_t distortion;
cpi->find_fractional_mv_step(
x,
new_mv,
@@ -2346,7 +2346,7 @@
x->mv_row_max = tmp_row_max;
if (bestsme < INT_MAX) {
- int dis; /* TODO: use dis in distortion calculation later. */
+ uint32_t dis; /* TODO: use dis in distortion calculation later. */
cpi->find_fractional_mv_step(x, &tmp_mv->as_mv, &ref_mv,
cm->allow_high_precision_mv,
x->errorperbit,
--- a/vp9/encoder/vp9_temporal_filter.c
+++ b/vp9/encoder/vp9_temporal_filter.c
@@ -264,8 +264,8 @@
int step_param;
int sadpb = x->sadperbit16;
int bestsme = INT_MAX;
- int distortion;
- unsigned int sse;
+ uint32_t distortion;
+ uint32_t sse;
int cost_list[5];
MV best_ref_mv1 = {0, 0};