ref: a6bc3dfb0f8e46017d7823985f8383a8dc6cc68b
parent: 009116cb6f7a75645c7c3092f97a5fa4cd42d15b
parent: e6446b4b60c7dd3cdeb6cfc3694f9a81df596764
author: Debargha Mukherjee <[email protected]>
date: Thu Sep 1 12:46:32 EDT 2016
Merge "Refactor uv tx size with lookup arrays"
--- a/vp9/common/vp9_blockd.h
+++ b/vp9/common/vp9_blockd.h
@@ -228,20 +228,13 @@
void vp9_setup_block_planes(MACROBLOCKD *xd, int ss_x, int ss_y);
-static INLINE TX_SIZE get_uv_tx_size_impl(TX_SIZE y_tx_size, BLOCK_SIZE bsize,
- int xss, int yss) {
- if (bsize < BLOCK_8X8) {
- return TX_4X4;
- } else {
- const BLOCK_SIZE plane_bsize = ss_size_lookup[bsize][xss][yss];
- return VPXMIN(y_tx_size, max_txsize_lookup[plane_bsize]);
- }
-}
-
static INLINE TX_SIZE get_uv_tx_size(const MODE_INFO *mi,
const struct macroblockd_plane *pd) {
- return get_uv_tx_size_impl(mi->tx_size, mi->sb_type, pd->subsampling_x,
- pd->subsampling_y);
+ assert(mi->sb_type < BLOCK_8X8 ||
+ ss_size_lookup[mi->sb_type][pd->subsampling_x][pd->subsampling_y] !=
+ BLOCK_INVALID);
+ return uv_txsize_lookup[mi->sb_type][mi->tx_size][pd->subsampling_x]
+ [pd->subsampling_y];
}
static INLINE BLOCK_SIZE
--- a/vp9/common/vp9_common_data.c
+++ b/vp9/common/vp9_common_data.c
@@ -125,6 +125,102 @@
{ { BLOCK_64X64, BLOCK_64X32 }, { BLOCK_32X64, BLOCK_32X32 } },
};
+const TX_SIZE uv_txsize_lookup[BLOCK_SIZES][TX_SIZES][2][2] = {
+ // ss_x == 0 ss_x == 0 ss_x == 1 ss_x == 1
+ // ss_y == 0 ss_y == 1 ss_y == 0 ss_y == 1
+ {
+ // BLOCK_4X4
+ { { TX_4X4, TX_4X4 }, { TX_4X4, TX_4X4 } },
+ { { TX_4X4, TX_4X4 }, { TX_4X4, TX_4X4 } },
+ { { TX_4X4, TX_4X4 }, { TX_4X4, TX_4X4 } },
+ { { TX_4X4, TX_4X4 }, { TX_4X4, TX_4X4 } },
+ },
+ {
+ // BLOCK_4X8
+ { { TX_4X4, TX_4X4 }, { TX_4X4, TX_4X4 } },
+ { { TX_4X4, TX_4X4 }, { TX_4X4, TX_4X4 } },
+ { { TX_4X4, TX_4X4 }, { TX_4X4, TX_4X4 } },
+ { { TX_4X4, TX_4X4 }, { TX_4X4, TX_4X4 } },
+ },
+ {
+ // BLOCK_8X4
+ { { TX_4X4, TX_4X4 }, { TX_4X4, TX_4X4 } },
+ { { TX_4X4, TX_4X4 }, { TX_4X4, TX_4X4 } },
+ { { TX_4X4, TX_4X4 }, { TX_4X4, TX_4X4 } },
+ { { TX_4X4, TX_4X4 }, { TX_4X4, TX_4X4 } },
+ },
+ {
+ // BLOCK_8X8
+ { { TX_4X4, TX_4X4 }, { TX_4X4, TX_4X4 } },
+ { { TX_8X8, TX_4X4 }, { TX_4X4, TX_4X4 } },
+ { { TX_8X8, TX_4X4 }, { TX_4X4, TX_4X4 } },
+ { { TX_8X8, TX_4X4 }, { TX_4X4, TX_4X4 } },
+ },
+ {
+ // BLOCK_8X16
+ { { TX_4X4, TX_4X4 }, { TX_4X4, TX_4X4 } },
+ { { TX_8X8, TX_8X8 }, { TX_4X4, TX_4X4 } },
+ { { TX_8X8, TX_8X8 }, { TX_4X4, TX_4X4 } },
+ { { TX_8X8, TX_8X8 }, { TX_4X4, TX_4X4 } },
+ },
+ {
+ // BLOCK_16X8
+ { { TX_4X4, TX_4X4 }, { TX_4X4, TX_4X4 } },
+ { { TX_8X8, TX_4X4 }, { TX_8X8, TX_4X4 } },
+ { { TX_8X8, TX_4X4 }, { TX_8X8, TX_8X8 } },
+ { { TX_8X8, TX_4X4 }, { TX_8X8, TX_8X8 } },
+ },
+ {
+ // BLOCK_16X16
+ { { TX_4X4, TX_4X4 }, { TX_4X4, TX_4X4 } },
+ { { TX_8X8, TX_8X8 }, { TX_8X8, TX_8X8 } },
+ { { TX_16X16, TX_8X8 }, { TX_8X8, TX_8X8 } },
+ { { TX_16X16, TX_8X8 }, { TX_8X8, TX_8X8 } },
+ },
+ {
+ // BLOCK_16X32
+ { { TX_4X4, TX_4X4 }, { TX_4X4, TX_4X4 } },
+ { { TX_8X8, TX_8X8 }, { TX_8X8, TX_8X8 } },
+ { { TX_16X16, TX_16X16 }, { TX_8X8, TX_8X8 } },
+ { { TX_16X16, TX_16X16 }, { TX_8X8, TX_8X8 } },
+ },
+ {
+ // BLOCK_32X16
+ { { TX_4X4, TX_4X4 }, { TX_4X4, TX_4X4 } },
+ { { TX_8X8, TX_8X8 }, { TX_8X8, TX_8X8 } },
+ { { TX_16X16, TX_8X8 }, { TX_16X16, TX_8X8 } },
+ { { TX_16X16, TX_8X8 }, { TX_16X16, TX_8X8 } },
+ },
+ {
+ // BLOCK_32X32
+ { { TX_4X4, TX_4X4 }, { TX_4X4, TX_4X4 } },
+ { { TX_8X8, TX_8X8 }, { TX_8X8, TX_8X8 } },
+ { { TX_16X16, TX_16X16 }, { TX_16X16, TX_16X16 } },
+ { { TX_32X32, TX_16X16 }, { TX_16X16, TX_16X16 } },
+ },
+ {
+ // BLOCK_32X64
+ { { TX_4X4, TX_4X4 }, { TX_4X4, TX_4X4 } },
+ { { TX_8X8, TX_8X8 }, { TX_8X8, TX_8X8 } },
+ { { TX_16X16, TX_16X16 }, { TX_16X16, TX_16X16 } },
+ { { TX_32X32, TX_32X32 }, { TX_16X16, TX_16X16 } },
+ },
+ {
+ // BLOCK_64X32
+ { { TX_4X4, TX_4X4 }, { TX_4X4, TX_4X4 } },
+ { { TX_8X8, TX_8X8 }, { TX_8X8, TX_8X8 } },
+ { { TX_16X16, TX_16X16 }, { TX_16X16, TX_16X16 } },
+ { { TX_32X32, TX_16X16 }, { TX_32X32, TX_16X16 } },
+ },
+ {
+ // BLOCK_64X64
+ { { TX_4X4, TX_4X4 }, { TX_4X4, TX_4X4 } },
+ { { TX_8X8, TX_8X8 }, { TX_8X8, TX_8X8 } },
+ { { TX_16X16, TX_16X16 }, { TX_16X16, TX_16X16 } },
+ { { TX_32X32, TX_32X32 }, { TX_32X32, TX_32X32 } },
+ },
+};
+
// Generates 4 bit field in which each bit set to 1 represents
// a blocksize partition 1111 means we split 64x64, 32x32, 16x16
// and 8x8. 1000 means we just split the 64x64 to 32x32
--- a/vp9/common/vp9_common_data.h
+++ b/vp9/common/vp9_common_data.h
@@ -33,6 +33,7 @@
extern const BLOCK_SIZE txsize_to_bsize[TX_SIZES];
extern const TX_SIZE tx_mode_to_biggest_tx_size[TX_MODES];
extern const BLOCK_SIZE ss_size_lookup[BLOCK_SIZES][2][2];
+extern const TX_SIZE uv_txsize_lookup[BLOCK_SIZES][TX_SIZES][2][2];
#if CONFIG_BETTER_HW_COMPATIBILITY && CONFIG_VP9_HIGHBITDEPTH
extern const uint8_t need_top_left[INTRA_MODES];
#endif // CONFIG_BETTER_HW_COMPATIBILITY && CONFIG_VP9_HIGHBITDEPTH
--- a/vp9/common/vp9_loopfilter.c
+++ b/vp9/common/vp9_loopfilter.c
@@ -655,7 +655,7 @@
const int shift_uv, LOOP_FILTER_MASK *lfm) {
const BLOCK_SIZE block_size = mi->sb_type;
const TX_SIZE tx_size_y = mi->tx_size;
- const TX_SIZE tx_size_uv = get_uv_tx_size_impl(tx_size_y, block_size, 1, 1);
+ const TX_SIZE tx_size_uv = uv_txsize_lookup[block_size][tx_size_y][1][1];
const int filter_level = get_filter_level(lfi_n, mi);
uint64_t *const left_y = &lfm->left_y[tx_size_y];
uint64_t *const above_y = &lfm->above_y[tx_size_y];
@@ -1524,7 +1524,7 @@
const TX_SIZE tx_size_y = mi->tx_size;
const loop_filter_info_n *const lfi_n = &cm->lf_info;
const int filter_level = get_filter_level(lfi_n, mi);
- const TX_SIZE tx_size_uv = get_uv_tx_size_impl(tx_size_y, block_size, 1, 1);
+ const TX_SIZE tx_size_uv = uv_txsize_lookup[block_size][tx_size_y][1][1];
LOOP_FILTER_MASK *const lfm = get_lfm(&cm->lf, mi_row, mi_col);
uint64_t *const left_y = &lfm->left_y[tx_size_y];
uint64_t *const above_y = &lfm->above_y[tx_size_y];
--- a/vp9/decoder/vp9_decodeframe.c
+++ b/vp9/decoder/vp9_decodeframe.c
@@ -712,13 +712,6 @@
}
}
-static INLINE TX_SIZE dec_get_uv_tx_size(const MODE_INFO *mi, int n4_wl,
- int n4_hl) {
- // get minimum log2 num4x4s dimension
- const int x = VPXMIN(n4_wl, n4_hl);
- return VPXMIN(mi->tx_size, x);
-}
-
static INLINE void dec_reset_skip_context(MACROBLOCKD *xd) {
int i;
for (i = 0; i < MAX_MB_PLANE; i++) {
@@ -799,8 +792,7 @@
int plane;
for (plane = 0; plane < MAX_MB_PLANE; ++plane) {
const struct macroblockd_plane *const pd = &xd->plane[plane];
- const TX_SIZE tx_size =
- plane ? dec_get_uv_tx_size(mi, pd->n4_wl, pd->n4_hl) : mi->tx_size;
+ const TX_SIZE tx_size = plane ? get_uv_tx_size(mi, pd) : mi->tx_size;
const int num_4x4_w = pd->n4_w;
const int num_4x4_h = pd->n4_h;
const int step = (1 << tx_size);
@@ -833,8 +825,7 @@
for (plane = 0; plane < MAX_MB_PLANE; ++plane) {
const struct macroblockd_plane *const pd = &xd->plane[plane];
- const TX_SIZE tx_size =
- plane ? dec_get_uv_tx_size(mi, pd->n4_wl, pd->n4_hl) : mi->tx_size;
+ const TX_SIZE tx_size = plane ? get_uv_tx_size(mi, pd) : mi->tx_size;
const int num_4x4_w = pd->n4_w;
const int num_4x4_h = pd->n4_h;
const int step = (1 << tx_size);
--- a/vp9/encoder/vp9_rdopt.c
+++ b/vp9/encoder/vp9_rdopt.c
@@ -2848,8 +2848,8 @@
return;
}
}
- max_uv_tx_size = get_uv_tx_size_impl(
- xd->mi[0]->tx_size, bsize, pd[1].subsampling_x, pd[1].subsampling_y);
+ max_uv_tx_size = uv_txsize_lookup[bsize][xd->mi[0]->tx_size]
+ [pd[1].subsampling_x][pd[1].subsampling_y];
rd_pick_intra_sbuv_mode(cpi, x, ctx, &rate_uv, &rate_uv_tokenonly, &dist_uv,
&uv_skip, VPXMAX(BLOCK_8X8, bsize), max_uv_tx_size);
@@ -3344,8 +3344,8 @@
best_rd);
if (rate_y == INT_MAX) continue;
- uv_tx = get_uv_tx_size_impl(mi->tx_size, bsize, pd->subsampling_x,
- pd->subsampling_y);
+ uv_tx = uv_txsize_lookup[bsize][mi->tx_size][pd->subsampling_x]
+ [pd->subsampling_y];
if (rate_uv_intra[uv_tx] == INT_MAX) {
choose_intra_uv_mode(cpi, x, ctx, bsize, uv_tx, &rate_uv_intra[uv_tx],
&rate_uv_tokenonly[uv_tx], &dist_uv[uv_tx],