Commit a2d5cdef authored by Timothy B. Terriberry's avatar Timothy B. Terriberry

cb4x4: Fix reset_skip_context() without chroma_2x2

reset_skip_context() was always clearing the entropy contexts for
all three color planes, using a block size that corresponded with
the luma plane.

However, when chroma_2x2 is disabled, then for sub-8x8 luma block
sizes, the corresponding chroma block size is always 4x4, and the
skip flag only affects the chroma blocks corresponding to the
upper-left luma block.

This patch makes reset_skip_context() reset the contexts that
actually correspond to the chroma blocks that are skipped (if any).
It also moves reset_skip_context() to av1_reset_skip_context() in
blockd.c, because blockd.h gets included before onyx_int.h, which
declares the required is_chroma_reference() function.
reset_skip_context() was too large and used in too many places to
be a reasonable candidate for inlining, anyway.

AWCY results on objective-1-fast:

cb4x4-fix-base@2017-05-11T06:26:50.159Z -> cb4x4-fix-reset_skip@2017-05-11T06:28:45.482Z
  PSNR | PSNR Cb | PSNR Cr | PSNR HVS |   SSIM | MS SSIM | CIEDE 2000
0.0301 |  0.1068 |  0.1463 |   0.0359 | 0.0260 |  0.0347 |     0.0479

A regression (near the noise range), but without this fix, the line
buffer size required by the entropy contexts will be doubled.

Change-Id: I12fa6e60d9c1c7c85927742775a346ea22b3193f
parent fe0fb1d9
......@@ -259,6 +259,36 @@ void av1_set_contexts(const MACROBLOCKD *xd, struct macroblockd_plane *pd,
}
#endif
void av1_reset_skip_context(MACROBLOCKD *xd, int mi_row, int mi_col,
BLOCK_SIZE bsize) {
int i;
int nplanes;
#if CONFIG_CB4X4
int chroma_ref;
chroma_ref =
is_chroma_reference(mi_row, mi_col, bsize, xd->plane[1].subsampling_x,
xd->plane[1].subsampling_y);
nplanes = 1 + (MAX_MB_PLANE - 1) * chroma_ref;
#else
(void)mi_row;
(void)mi_col;
nplanes = MAX_MB_PLANE;
#endif
for (i = 0; i < nplanes; i++) {
struct macroblockd_plane *const pd = &xd->plane[i];
#if CONFIG_CHROMA_2X2 || !CONFIG_CB4X4
const BLOCK_SIZE plane_bsize = get_plane_block_size(bsize, pd);
#else
const BLOCK_SIZE plane_bsize =
AOMMAX(BLOCK_4X4, get_plane_block_size(bsize, pd));
#endif
const int txs_wide = block_size_wide[plane_bsize] >> tx_size_wide_log2[0];
const int txs_high = block_size_high[plane_bsize] >> tx_size_high_log2[0];
memset(pd->above_context, 0, sizeof(ENTROPY_CONTEXT) * txs_wide);
memset(pd->left_context, 0, sizeof(ENTROPY_CONTEXT) * txs_high);
}
}
void av1_setup_block_planes(MACROBLOCKD *xd, int ss_x, int ss_y) {
int i;
......
......@@ -1137,17 +1137,8 @@ get_plane_block_size(BLOCK_SIZE bsize, const struct macroblockd_plane *pd) {
return ss_size_lookup[bsize][pd->subsampling_x][pd->subsampling_y];
}
static INLINE void reset_skip_context(MACROBLOCKD *xd, BLOCK_SIZE bsize) {
int i;
for (i = 0; i < MAX_MB_PLANE; i++) {
struct macroblockd_plane *const pd = &xd->plane[i];
const BLOCK_SIZE plane_bsize = get_plane_block_size(bsize, pd);
const int txs_wide = block_size_wide[plane_bsize] >> tx_size_wide_log2[0];
const int txs_high = block_size_high[plane_bsize] >> tx_size_high_log2[0];
memset(pd->above_context, 0, sizeof(ENTROPY_CONTEXT) * txs_wide);
memset(pd->left_context, 0, sizeof(ENTROPY_CONTEXT) * txs_high);
}
}
void av1_reset_skip_context(MACROBLOCKD *xd, int mi_row, int mi_col,
BLOCK_SIZE bsize);
typedef void (*foreach_transformed_block_visitor)(int plane, int block,
int blk_row, int blk_col,
......
......@@ -1709,9 +1709,11 @@ static void decode_token_and_recon_block(AV1Decoder *const pbi,
#endif
#if CONFIG_CB4X4
if (mbmi->skip) reset_skip_context(xd, bsize);
if (mbmi->skip) av1_reset_skip_context(xd, mi_row, mi_col, bsize);
#else
if (mbmi->skip) reset_skip_context(xd, AOMMAX(BLOCK_8X8, bsize));
if (mbmi->skip) {
av1_reset_skip_context(xd, mi_row, mi_col, AOMMAX(BLOCK_8X8, bsize));
}
#endif
#if CONFIG_COEF_INTERLEAVE
......@@ -2408,7 +2410,7 @@ static void decode_partition(AV1Decoder *const pbi, MACROBLOCKD *const xd,
set_skip_context(xd, mi_row, mi_col);
skip = read_skip(cm, xd, xd->mi[0]->mbmi.segment_id_supertx, r);
if (skip) {
reset_skip_context(xd, bsize);
av1_reset_skip_context(xd, mi_row, mi_col, bsize);
} else {
#if CONFIG_EXT_TX
if (get_ext_tx_types(supertx_size, bsize, 1, cm->reduced_tx_set_used) >
......
......@@ -2522,12 +2522,13 @@ static void encode_sb(const AV1_COMP *const cpi, ThreadData *td,
if (!x->skip) {
int this_rate = 0;
av1_encode_sb_supertx((AV1_COMMON *)cm, x, bsize);
av1_tokenize_sb_supertx(cpi, td, tp, dry_run, bsize, rate);
av1_tokenize_sb_supertx(cpi, td, tp, dry_run, mi_row, mi_col, bsize,
rate);
if (rate) *rate += this_rate;
} else {
xd->mi[0]->mbmi.skip = 1;
if (!dry_run) td->counts->skip[av1_get_skip_context(xd)][1]++;
reset_skip_context(xd, bsize);
av1_reset_skip_context(xd, mi_row, mi_col, bsize);
}
if (!dry_run) {
for (y_idx = 0; y_idx < mi_height; y_idx++)
......
......@@ -568,7 +568,7 @@ void av1_update_txb_context(const AV1_COMP *cpi, ThreadData *td,
(void)mi_col;
if (mbmi->skip) {
if (!dry_run) td->counts->skip[ctx][1] += skip_inc;
reset_skip_context(xd, bsize);
av1_reset_skip_context(xd, mi_row, mi_col, bsize);
return;
}
......
......@@ -689,7 +689,7 @@ void av1_tokenize_sb_vartx(const AV1_COMP *cpi, ThreadData *td, TOKENEXTRA **t,
if (mbmi->skip) {
if (!dry_run) td->counts->skip[ctx][1] += skip_inc;
reset_skip_context(xd, bsize);
av1_reset_skip_context(xd, mi_row, mi_col, bsize);
#if !CONFIG_LV_MAP
if (dry_run) *t = t_backup;
#endif
......@@ -764,7 +764,7 @@ void av1_tokenize_sb(const AV1_COMP *cpi, ThreadData *td, TOKENEXTRA **t,
struct tokenize_b_args arg = { cpi, td, t, 0 };
if (mbmi->skip) {
if (!dry_run) td->counts->skip[ctx][1] += skip_inc;
reset_skip_context(xd, bsize);
av1_reset_skip_context(xd, mi_row, mi_col, bsize);
return;
}
......@@ -839,8 +839,8 @@ void av1_tokenize_sb(const AV1_COMP *cpi, ThreadData *td, TOKENEXTRA **t,
#if CONFIG_SUPERTX
void av1_tokenize_sb_supertx(const AV1_COMP *cpi, ThreadData *td,
TOKENEXTRA **t, RUN_TYPE dry_run, BLOCK_SIZE bsize,
int *rate) {
TOKENEXTRA **t, RUN_TYPE dry_run, int mi_row,
int mi_col, BLOCK_SIZE bsize, int *rate) {
const AV1_COMMON *const cm = &cpi->common;
MACROBLOCKD *const xd = &td->mb.e_mbd;
MB_MODE_INFO *const mbmi = &xd->mi[0]->mbmi;
......@@ -851,7 +851,7 @@ void av1_tokenize_sb_supertx(const AV1_COMP *cpi, ThreadData *td,
struct tokenize_b_args arg = { cpi, td, t, 0 };
if (mbmi->skip) {
if (!dry_run) td->counts->skip[ctx][1] += skip_inc;
reset_skip_context(xd, bsize);
av1_reset_skip_context(xd, mi_row, mi_col, bsize);
if (dry_run) *t = t_backup;
return;
}
......
......@@ -90,8 +90,8 @@ void av1_tokenize_sb(const struct AV1_COMP *cpi, struct ThreadData *td,
int *rate, const int mi_row, const int mi_col);
#if CONFIG_SUPERTX
void av1_tokenize_sb_supertx(const struct AV1_COMP *cpi, struct ThreadData *td,
TOKENEXTRA **t, RUN_TYPE dry_run, BLOCK_SIZE bsize,
int *rate);
TOKENEXTRA **t, RUN_TYPE dry_run, int mi_row,
int mi_col, BLOCK_SIZE bsize, int *rate);
#endif
extern const int16_t *av1_dct_value_cost_ptr;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment