ref: 356c944782fc5232735f3e0d6a8f2c528ab91b04
parent: 9a5e0d40388c205f33ab6db344eebe6526dd7232
parent: c7f58bcc70f0d8fb7ff48c115f53f78165d26792
author: Johann Koenig <[email protected]>
date: Tue Sep 1 22:46:54 EDT 2015
Merge changes I2e0cc72a,I63a7da78,I4aee2600,I71113505,I3dce6c70, ... * changes: Only build multithreaded functions on mt builds. Don't build calc_psnr for high bit depth. Enable missing dual lpf test Remove unused VP10 functions. Mark VP10 functions as 'INLINE' Remove unused functions from test files Only build append_negative_gtest_filter when it is used. Add INLINE decoration to static test functions
--- a/test/dct16x16_test.cc
+++ b/test/dct16x16_test.cc
@@ -40,30 +40,6 @@
#endif
const int kNumCoeffs = 256;
-const double PI = 3.1415926535898;
-void reference2_16x16_idct_2d(double *input, double *output) {
- double x;
- for (int l = 0; l < 16; ++l) {
- for (int k = 0; k < 16; ++k) {
- double s = 0;
- for (int i = 0; i < 16; ++i) {
- for (int j = 0; j < 16; ++j) {
- x = cos(PI * j * (l + 0.5) / 16.0) *
- cos(PI * i * (k + 0.5) / 16.0) *
- input[i * 16 + j] / 256;
- if (i != 0)
- x *= sqrt(2.0);
- if (j != 0)
- x *= sqrt(2.0);
- s += x;
- }
- }
- output[k*16+l] = s;
- }
- }
-}
-
-
const double C1 = 0.995184726672197;
const double C2 = 0.98078528040323;
const double C3 = 0.956940335732209;
--- a/test/idct8x8_test.cc
+++ b/test/idct8x8_test.cc
@@ -67,43 +67,6 @@
output[i] *= 2;
}
-void reference_idct_1d(double input[8], double output[8]) {
- const double kPi = 3.141592653589793238462643383279502884;
- const double kSqrt2 = 1.414213562373095048801688724209698;
- for (int k = 0; k < 8; k++) {
- output[k] = 0.0;
- for (int n = 0; n < 8; n++) {
- output[k] += input[n]*cos(kPi*(2*k+1)*n/16.0);
- if (n == 0)
- output[k] = output[k]/kSqrt2;
- }
- }
-}
-
-void reference_idct_2d(double input[64], int16_t output[64]) {
- double out[64], out2[64];
- // First transform rows
- for (int i = 0; i < 8; ++i) {
- double temp_in[8], temp_out[8];
- for (int j = 0; j < 8; ++j)
- temp_in[j] = input[j + i*8];
- reference_idct_1d(temp_in, temp_out);
- for (int j = 0; j < 8; ++j)
- out[j + i*8] = temp_out[j];
- }
- // Then transform columns
- for (int i = 0; i < 8; ++i) {
- double temp_in[8], temp_out[8];
- for (int j = 0; j < 8; ++j)
- temp_in[j] = out[j*8 + i];
- reference_idct_1d(temp_in, temp_out);
- for (int j = 0; j < 8; ++j)
- out2[j*8 + i] = temp_out[j];
- }
- for (int i = 0; i < 64; ++i)
- output[i] = round(out2[i]/32);
-}
-
TEST(VP9Idct8x8Test, AccuracyCheck) {
ACMRandom rnd(ACMRandom::DeterministicSeed());
const int count_test_block = 10000;
--- a/test/lpf_8_test.cc
+++ b/test/lpf_8_test.cc
@@ -590,7 +590,9 @@
make_tuple(&vpx_lpf_horizontal_16_sse2, &vpx_lpf_horizontal_16_c, 8, 1),
make_tuple(&vpx_lpf_horizontal_16_sse2, &vpx_lpf_horizontal_16_c, 8, 2),
make_tuple(&vpx_lpf_vertical_8_sse2, &vpx_lpf_vertical_8_c, 8, 1),
- make_tuple(&wrapper_vertical_16_sse2, &wrapper_vertical_16_c, 8, 1)));
+ make_tuple(&wrapper_vertical_16_sse2, &wrapper_vertical_16_c, 8, 1),
+ make_tuple(&wrapper_vertical_16_dual_sse2,
+ &wrapper_vertical_16_dual_c, 8, 1)));
#endif // CONFIG_VP9_HIGHBITDEPTH
#endif
--- a/test/test_libvpx.cc
+++ b/test/test_libvpx.cc
@@ -26,6 +26,7 @@
extern void vpx_scale_rtcd();
}
+#if ARCH_X86 || ARCH_X86_64
static void append_negative_gtest_filter(const char *str) {
std::string filter = ::testing::FLAGS_gtest_filter;
// Negative patterns begin with one '-' followed by a ':' separated list.
@@ -33,6 +34,7 @@
filter += str;
::testing::FLAGS_gtest_filter = filter;
}
+#endif // ARCH_X86 || ARCH_X86_64
int main(int argc, char **argv) {
::testing::InitGoogleTest(&argc, argv);
@@ -55,7 +57,7 @@
append_negative_gtest_filter(":AVX.*:AVX/*");
if (!(simd_caps & HAS_AVX2))
append_negative_gtest_filter(":AVX2.*:AVX2/*");
-#endif
+#endif // ARCH_X86 || ARCH_X86_64
#if !CONFIG_SHARED
// Shared library builds don't support whitebox tests
--- a/test/util.h
+++ b/test/util.h
@@ -19,8 +19,7 @@
// Macros
#define GET_PARAM(k) std::tr1::get< k >(GetParam())
-static double compute_psnr(const vpx_image_t *img1,
- const vpx_image_t *img2) {
+static INLINE double compute_psnr(const vpx_image_t *img1, const vpx_image_t *img2) {
assert((img1->fmt == img2->fmt) &&
(img1->d_w == img2->d_w) &&
(img1->d_h == img2->d_h));
--- a/test/video_source.h
+++ b/test/video_source.h
@@ -48,7 +48,7 @@
#undef TO_STRING
#undef STRINGIFY
-static FILE *OpenTestDataFile(const std::string& file_name) {
+static INLINE FILE *OpenTestDataFile(const std::string& file_name) {
const std::string path_to_source = GetDataPath() + "/" + file_name;
return fopen(path_to_source.c_str(), "rb");
}
--- a/vp10/common/reconinter.h
+++ b/vp10/common/reconinter.h
@@ -34,14 +34,14 @@
}
#if CONFIG_VP9_HIGHBITDEPTH
-static void high_inter_predictor(const uint8_t *src, int src_stride,
- uint8_t *dst, int dst_stride,
- const int subpel_x,
- const int subpel_y,
- const struct scale_factors *sf,
- int w, int h, int ref,
- const InterpKernel *kernel,
- int xs, int ys, int bd) {
+static INLINE void high_inter_predictor(const uint8_t *src, int src_stride,
+ uint8_t *dst, int dst_stride,
+ const int subpel_x,
+ const int subpel_y,
+ const struct scale_factors *sf,
+ int w, int h, int ref,
+ const InterpKernel *kernel,
+ int xs, int ys, int bd) {
sf->highbd_predict[subpel_x != 0][subpel_y != 0][ref](
src, src_stride, dst, dst_stride,
kernel[subpel_x], xs, kernel[subpel_y], ys, w, h, bd);
@@ -77,8 +77,9 @@
}
// TODO(jkoleszar): yet another mv clamping function :-(
-static MV clamp_mv_to_umv_border_sb(const MACROBLOCKD *xd, const MV *src_mv,
- int bw, int bh, int ss_x, int ss_y) {
+static INLINE MV clamp_mv_to_umv_border_sb(const MACROBLOCKD *xd,
+ const MV *src_mv,
+ int bw, int bh, int ss_x, int ss_y) {
// If the MV points so far into the UMV border that no visible pixels
// are used for reconstruction, the subpel part of the MV can be
// discarded and the MV limited to 16 pixels with equivalent results.
@@ -102,8 +103,8 @@
return clamped_mv;
}
-static MV average_split_mvs(const struct macroblockd_plane *pd,
- const MODE_INFO *mi, int ref, int block) {
+static INLINE MV average_split_mvs(const struct macroblockd_plane *pd,
+ const MODE_INFO *mi, int ref, int block) {
const int ss_idx = ((pd->subsampling_x > 0) << 1) | (pd->subsampling_y > 0);
MV res = {0, 0};
switch (ss_idx) {
--- a/vp10/encoder/encodeframe.c
+++ b/vp10/encoder/encodeframe.c
@@ -238,19 +238,6 @@
xd->tile = *tile;
}
-static void duplicate_mode_info_in_sb(VP10_COMMON *cm, MACROBLOCKD *xd,
- int mi_row, int mi_col,
- BLOCK_SIZE bsize) {
- const int block_width = num_8x8_blocks_wide_lookup[bsize];
- const int block_height = num_8x8_blocks_high_lookup[bsize];
- int i, j;
- for (j = 0; j < block_height; ++j)
- for (i = 0; i < block_width; ++i) {
- if (mi_row + j < cm->mi_rows && mi_col + i < cm->mi_cols)
- xd->mi[j * xd->mi_stride + i] = xd->mi[0];
- }
-}
-
static void set_block_size(VP10_COMP * const cpi,
MACROBLOCK *const x,
MACROBLOCKD *const xd,
@@ -1106,36 +1093,6 @@
x->e_mbd.plane[i].subsampling_y);
}
-static void set_mode_info_seg_skip(MACROBLOCK *x, TX_MODE tx_mode,
- RD_COST *rd_cost, BLOCK_SIZE bsize) {
- MACROBLOCKD *const xd = &x->e_mbd;
- MB_MODE_INFO *const mbmi = &xd->mi[0]->mbmi;
- INTERP_FILTER filter_ref;
-
- if (xd->up_available)
- filter_ref = xd->mi[-xd->mi_stride]->mbmi.interp_filter;
- else if (xd->left_available)
- filter_ref = xd->mi[-1]->mbmi.interp_filter;
- else
- filter_ref = EIGHTTAP;
-
- mbmi->sb_type = bsize;
- mbmi->mode = ZEROMV;
- mbmi->tx_size = VPXMIN(max_txsize_lookup[bsize],
- tx_mode_to_biggest_tx_size[tx_mode]);
- mbmi->skip = 1;
- mbmi->uv_mode = DC_PRED;
- mbmi->ref_frame[0] = LAST_FRAME;
- mbmi->ref_frame[1] = NONE;
- mbmi->mv[0].as_int = 0;
- mbmi->interp_filter = filter_ref;
-
- xd->mi[0]->bmi[0].as_mv[0].as_int = 0;
- x->skip = 1;
-
- vp10_rd_cost_init(rd_cost);
-}
-
static int set_segment_rdmult(VP10_COMP *const cpi,
MACROBLOCK *const x,
int8_t segment_id) {
@@ -2626,83 +2583,6 @@
return TX_MODE_SELECT;
else
return cpi->common.tx_mode;
-}
-
-static void fill_mode_info_sb(VP10_COMMON *cm, MACROBLOCK *x,
- int mi_row, int mi_col,
- BLOCK_SIZE bsize,
- PC_TREE *pc_tree) {
- MACROBLOCKD *xd = &x->e_mbd;
- int bsl = b_width_log2_lookup[bsize], hbs = (1 << bsl) / 4;
- PARTITION_TYPE partition = pc_tree->partitioning;
- BLOCK_SIZE subsize = get_subsize(bsize, partition);
-
- assert(bsize >= BLOCK_8X8);
-
- if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols)
- return;
-
- switch (partition) {
- case PARTITION_NONE:
- set_mode_info_offsets(cm, x, xd, mi_row, mi_col);
- *(xd->mi[0]) = pc_tree->none.mic;
- *(x->mbmi_ext) = pc_tree->none.mbmi_ext;
- duplicate_mode_info_in_sb(cm, xd, mi_row, mi_col, bsize);
- break;
- case PARTITION_VERT:
- set_mode_info_offsets(cm, x, xd, mi_row, mi_col);
- *(xd->mi[0]) = pc_tree->vertical[0].mic;
- *(x->mbmi_ext) = pc_tree->vertical[0].mbmi_ext;
- duplicate_mode_info_in_sb(cm, xd, mi_row, mi_col, subsize);
-
- if (mi_col + hbs < cm->mi_cols) {
- set_mode_info_offsets(cm, x, xd, mi_row, mi_col + hbs);
- *(xd->mi[0]) = pc_tree->vertical[1].mic;
- *(x->mbmi_ext) = pc_tree->vertical[1].mbmi_ext;
- duplicate_mode_info_in_sb(cm, xd, mi_row, mi_col + hbs, subsize);
- }
- break;
- case PARTITION_HORZ:
- set_mode_info_offsets(cm, x, xd, mi_row, mi_col);
- *(xd->mi[0]) = pc_tree->horizontal[0].mic;
- *(x->mbmi_ext) = pc_tree->horizontal[0].mbmi_ext;
- duplicate_mode_info_in_sb(cm, xd, mi_row, mi_col, subsize);
- if (mi_row + hbs < cm->mi_rows) {
- set_mode_info_offsets(cm, x, xd, mi_row + hbs, mi_col);
- *(xd->mi[0]) = pc_tree->horizontal[1].mic;
- *(x->mbmi_ext) = pc_tree->horizontal[1].mbmi_ext;
- duplicate_mode_info_in_sb(cm, xd, mi_row + hbs, mi_col, subsize);
- }
- break;
- case PARTITION_SPLIT: {
- fill_mode_info_sb(cm, x, mi_row, mi_col, subsize, pc_tree->split[0]);
- fill_mode_info_sb(cm, x, mi_row, mi_col + hbs, subsize,
- pc_tree->split[1]);
- fill_mode_info_sb(cm, x, mi_row + hbs, mi_col, subsize,
- pc_tree->split[2]);
- fill_mode_info_sb(cm, x, mi_row + hbs, mi_col + hbs, subsize,
- pc_tree->split[3]);
- break;
- }
- default:
- break;
- }
-}
-
-// Reset the prediction pixel ready flag recursively.
-static void pred_pixel_ready_reset(PC_TREE *pc_tree, BLOCK_SIZE bsize) {
- pc_tree->none.pred_pixel_ready = 0;
- pc_tree->horizontal[0].pred_pixel_ready = 0;
- pc_tree->horizontal[1].pred_pixel_ready = 0;
- pc_tree->vertical[0].pred_pixel_ready = 0;
- pc_tree->vertical[1].pred_pixel_ready = 0;
-
- if (bsize > BLOCK_8X8) {
- BLOCK_SIZE subsize = get_subsize(bsize, PARTITION_SPLIT);
- int i;
- for (i = 0; i < 4; ++i)
- pred_pixel_ready_reset(pc_tree->split[i], subsize);
- }
}
static int get_skip_encode_frame(const VP10_COMMON *cm, ThreadData *const td) {
--- a/vp10/encoder/encoder.c
+++ b/vp10/encoder/encoder.c
@@ -2145,42 +2145,6 @@
uint32_t samples[4]; // total/y/u/v
} PSNR_STATS;
-static void calc_psnr(const YV12_BUFFER_CONFIG *a, const YV12_BUFFER_CONFIG *b,
- PSNR_STATS *psnr) {
- static const double peak = 255.0;
- const int widths[3] = {
- a->y_crop_width, a->uv_crop_width, a->uv_crop_width};
- const int heights[3] = {
- a->y_crop_height, a->uv_crop_height, a->uv_crop_height};
- const uint8_t *a_planes[3] = {a->y_buffer, a->u_buffer, a->v_buffer};
- const int a_strides[3] = {a->y_stride, a->uv_stride, a->uv_stride};
- const uint8_t *b_planes[3] = {b->y_buffer, b->u_buffer, b->v_buffer};
- const int b_strides[3] = {b->y_stride, b->uv_stride, b->uv_stride};
- int i;
- uint64_t total_sse = 0;
- uint32_t total_samples = 0;
-
- for (i = 0; i < 3; ++i) {
- const int w = widths[i];
- const int h = heights[i];
- const uint32_t samples = w * h;
- const uint64_t sse = get_sse(a_planes[i], a_strides[i],
- b_planes[i], b_strides[i],
- w, h);
- psnr->sse[1 + i] = sse;
- psnr->samples[1 + i] = samples;
- psnr->psnr[1 + i] = vpx_sse_to_psnr(samples, peak, (double)sse);
-
- total_sse += sse;
- total_samples += samples;
- }
-
- psnr->sse[0] = total_sse;
- psnr->samples[0] = total_samples;
- psnr->psnr[0] = vpx_sse_to_psnr((double)total_samples, peak,
- (double)total_sse);
-}
-
#if CONFIG_VP9_HIGHBITDEPTH
static void calc_highbd_psnr(const YV12_BUFFER_CONFIG *a,
const YV12_BUFFER_CONFIG *b,
@@ -2220,6 +2184,44 @@
b_planes[i], b_strides[i],
w, h);
}
+ psnr->sse[1 + i] = sse;
+ psnr->samples[1 + i] = samples;
+ psnr->psnr[1 + i] = vpx_sse_to_psnr(samples, peak, (double)sse);
+
+ total_sse += sse;
+ total_samples += samples;
+ }
+
+ psnr->sse[0] = total_sse;
+ psnr->samples[0] = total_samples;
+ psnr->psnr[0] = vpx_sse_to_psnr((double)total_samples, peak,
+ (double)total_sse);
+}
+
+#else // !CONFIG_VP9_HIGHBITDEPTH
+
+static void calc_psnr(const YV12_BUFFER_CONFIG *a, const YV12_BUFFER_CONFIG *b,
+ PSNR_STATS *psnr) {
+ static const double peak = 255.0;
+ const int widths[3] = {
+ a->y_crop_width, a->uv_crop_width, a->uv_crop_width};
+ const int heights[3] = {
+ a->y_crop_height, a->uv_crop_height, a->uv_crop_height};
+ const uint8_t *a_planes[3] = {a->y_buffer, a->u_buffer, a->v_buffer};
+ const int a_strides[3] = {a->y_stride, a->uv_stride, a->uv_stride};
+ const uint8_t *b_planes[3] = {b->y_buffer, b->u_buffer, b->v_buffer};
+ const int b_strides[3] = {b->y_stride, b->uv_stride, b->uv_stride};
+ int i;
+ uint64_t total_sse = 0;
+ uint32_t total_samples = 0;
+
+ for (i = 0; i < 3; ++i) {
+ const int w = widths[i];
+ const int h = heights[i];
+ const uint32_t samples = w * h;
+ const uint64_t sse = get_sse(a_planes[i], a_strides[i],
+ b_planes[i], b_strides[i],
+ w, h);
psnr->sse[1 + i] = sse;
psnr->samples[1 + i] = samples;
psnr->psnr[1 + i] = vpx_sse_to_psnr(samples, peak, (double)sse);
--- a/vp10/encoder/ethread.c
+++ b/vp10/encoder/ethread.c
@@ -52,18 +52,6 @@
return 0;
}
-static int get_max_tile_cols(VP10_COMP *cpi) {
- const int aligned_width = ALIGN_POWER_OF_TWO(cpi->oxcf.width, MI_SIZE_LOG2);
- int mi_cols = aligned_width >> MI_SIZE_LOG2;
- int min_log2_tile_cols, max_log2_tile_cols;
- int log2_tile_cols;
-
- vp10_get_tile_n_bits(mi_cols, &min_log2_tile_cols, &max_log2_tile_cols);
- log2_tile_cols = clamp(cpi->oxcf.tile_columns,
- min_log2_tile_cols, max_log2_tile_cols);
- return (1 << log2_tile_cols);
-}
-
void vp10_encode_tiles_mt(VP10_COMP *cpi) {
VP10_COMMON *const cm = &cpi->common;
const int tile_cols = 1 << cm->log2_tile_cols;
--- a/vp8/encoder/bitstream.c
+++ b/vp8/encoder/bitstream.c
@@ -407,6 +407,7 @@
}
+#if CONFIG_MULTITHREAD
static void pack_mb_row_tokens(VP8_COMP *cpi, vp8_writer *w)
{
int mb_row;
@@ -421,6 +422,7 @@
}
}
+#endif // CONFIG_MULTITHREAD
static void write_mv_ref
(
@@ -1675,7 +1677,7 @@
if (cpi->b_multi_threaded)
pack_mb_row_tokens(cpi, &cpi->bc[1]);
else
-#endif
+#endif // CONFIG_MULTITHREAD
vp8_pack_tokens(&cpi->bc[1], cpi->tok, cpi->tok_count);
vp8_stop_encode(&cpi->bc[1]);
--- a/vp8/encoder/encodeframe.c
+++ b/vp8/encoder/encodeframe.c
@@ -700,6 +700,7 @@
vp8_zero(x->count_mb_ref_frame_usage);
}
+#if CONFIG_MULTITHREAD
static void sum_coef_counts(MACROBLOCK *x, MACROBLOCK *x_thread)
{
int i = 0;
@@ -729,6 +730,7 @@
}
while (++i < BLOCK_TYPES);
}
+#endif // CONFIG_MULTITHREAD
void vp8_encode_frame(VP8_COMP *cpi)
{
@@ -927,7 +929,7 @@
}
else
-#endif
+#endif // CONFIG_MULTITHREAD
{
/* for each macroblock row in image */
--- a/vp9/encoder/vp9_encoder.c
+++ b/vp9/encoder/vp9_encoder.c
@@ -2248,42 +2248,6 @@
uint32_t samples[4]; // total/y/u/v
} PSNR_STATS;
-static void calc_psnr(const YV12_BUFFER_CONFIG *a, const YV12_BUFFER_CONFIG *b,
- PSNR_STATS *psnr) {
- static const double peak = 255.0;
- const int widths[3] = {
- a->y_crop_width, a->uv_crop_width, a->uv_crop_width};
- const int heights[3] = {
- a->y_crop_height, a->uv_crop_height, a->uv_crop_height};
- const uint8_t *a_planes[3] = {a->y_buffer, a->u_buffer, a->v_buffer};
- const int a_strides[3] = {a->y_stride, a->uv_stride, a->uv_stride};
- const uint8_t *b_planes[3] = {b->y_buffer, b->u_buffer, b->v_buffer};
- const int b_strides[3] = {b->y_stride, b->uv_stride, b->uv_stride};
- int i;
- uint64_t total_sse = 0;
- uint32_t total_samples = 0;
-
- for (i = 0; i < 3; ++i) {
- const int w = widths[i];
- const int h = heights[i];
- const uint32_t samples = w * h;
- const uint64_t sse = get_sse(a_planes[i], a_strides[i],
- b_planes[i], b_strides[i],
- w, h);
- psnr->sse[1 + i] = sse;
- psnr->samples[1 + i] = samples;
- psnr->psnr[1 + i] = vpx_sse_to_psnr(samples, peak, (double)sse);
-
- total_sse += sse;
- total_samples += samples;
- }
-
- psnr->sse[0] = total_sse;
- psnr->samples[0] = total_samples;
- psnr->psnr[0] = vpx_sse_to_psnr((double)total_samples, peak,
- (double)total_sse);
-}
-
#if CONFIG_VP9_HIGHBITDEPTH
static void calc_highbd_psnr(const YV12_BUFFER_CONFIG *a,
const YV12_BUFFER_CONFIG *b,
@@ -2323,6 +2287,44 @@
b_planes[i], b_strides[i],
w, h);
}
+ psnr->sse[1 + i] = sse;
+ psnr->samples[1 + i] = samples;
+ psnr->psnr[1 + i] = vpx_sse_to_psnr(samples, peak, (double)sse);
+
+ total_sse += sse;
+ total_samples += samples;
+ }
+
+ psnr->sse[0] = total_sse;
+ psnr->samples[0] = total_samples;
+ psnr->psnr[0] = vpx_sse_to_psnr((double)total_samples, peak,
+ (double)total_sse);
+}
+
+#else // !CONFIG_VP9_HIGHBITDEPTH
+
+static void calc_psnr(const YV12_BUFFER_CONFIG *a, const YV12_BUFFER_CONFIG *b,
+ PSNR_STATS *psnr) {
+ static const double peak = 255.0;
+ const int widths[3] = {
+ a->y_crop_width, a->uv_crop_width, a->uv_crop_width};
+ const int heights[3] = {
+ a->y_crop_height, a->uv_crop_height, a->uv_crop_height};
+ const uint8_t *a_planes[3] = {a->y_buffer, a->u_buffer, a->v_buffer};
+ const int a_strides[3] = {a->y_stride, a->uv_stride, a->uv_stride};
+ const uint8_t *b_planes[3] = {b->y_buffer, b->u_buffer, b->v_buffer};
+ const int b_strides[3] = {b->y_stride, b->uv_stride, b->uv_stride};
+ int i;
+ uint64_t total_sse = 0;
+ uint32_t total_samples = 0;
+
+ for (i = 0; i < 3; ++i) {
+ const int w = widths[i];
+ const int h = heights[i];
+ const uint32_t samples = w * h;
+ const uint64_t sse = get_sse(a_planes[i], a_strides[i],
+ b_planes[i], b_strides[i],
+ w, h);
psnr->sse[1 + i] = sse;
psnr->samples[1 + i] = samples;
psnr->psnr[1 + i] = vpx_sse_to_psnr(samples, peak, (double)sse);