Commit 13685747 authored by John Koleszar's avatar John Koleszar
Browse files

Centralize mb skip state calculation

This patch moves the scattered updates to the mb skip state
(mode_info_context->mbmi.mb_skip_coeff) to vp8_tokenize_mb. Recent
changes to the quantizer exposed a bug where if a macroblock
could be coded as a skip but isn't, the encoder would run the
loopfilter but the decoder wouldn't, causing a reference buffer
mismatch.

The loopfilter is controlled by a flag called dc_diff. The decoder
looks at the number of decoded coefficients when setting this flag.
The encoder sets this flag based on the skip state, since any
skippable macroblock should be transmitted as a skip. The coefficient
optimization pass (vp8_optimize_b()) could change the coefficients
such that a block that was not a skip becomes one. The encoder was
not updating the skip state in this situation for intra coded blocks.

The underlying issue predates it, but this bug was recently triggered
by enabling trellis quantization on the Y2 block in commit dcd29e36,
and by changing the quantizer range control in commit 305be4e4.

Change-Id: I5cce5da0dbc2d22f7d79ee48149f01e868a64802
parent acff1627
......@@ -1071,8 +1071,6 @@ int vp8cx_encode_intra_macro_block(VP8_COMP *cpi, MACROBLOCK *x, TOKENEXTRA **t)
error_uv = vp8_rd_pick_intra_mbuv_mode(cpi, x, &rateuv, &rateuv_tokenonly, &distuv);
x->e_mbd.mode_info_context->mbmi.mb_skip_coeff = (cpi->common.mb_no_coeff_skip) ? 1 : 0;
vp8_encode_intra16x16mbuv(IF_RTCD(&cpi->rtcd), x);
rate += rateuv;
......@@ -1139,8 +1137,6 @@ int vp8cx_encode_intra_macro_block(VP8_COMP *cpi, MACROBLOCK *x, TOKENEXTRA **t)
else
Error4x4 = RD_ESTIMATE(x->rdmult, x->rddiv, rate2, distortion2);
x->e_mbd.mode_info_context->mbmi.mb_skip_coeff = (cpi->common.mb_no_coeff_skip) ? 1 : 0;
if (Error4x4 < Error16x16)
{
x->e_mbd.mode_info_context->mbmi.mode = B_PRED;
......@@ -1237,8 +1233,6 @@ int vp8cx_encode_inter_macroblock
if (xd->mode_info_context->mbmi.ref_frame == INTRA_FRAME)
{
x->e_mbd.mode_info_context->mbmi.mb_skip_coeff = (cpi->common.mb_no_coeff_skip) ? 1 : 0;
vp8_encode_intra16x16mbuv(IF_RTCD(&cpi->rtcd), x);
if (xd->mode_info_context->mbmi.mode == B_PRED)
......
......@@ -53,8 +53,6 @@ void vp8_encode_intra4x4block(const VP8_ENCODER_RTCD *rtcd, MACROBLOCK *x, BLOCK
x->quantize_b(be, b);
x->e_mbd.mode_info_context->mbmi.mb_skip_coeff &= (!b->eob);
vp8_inverse_transform_b(IF_RTCD(&rtcd->common->idct), b, 32);
RECON_INVOKE(&rtcd->common->recon, recon)(b->predictor, b->diff, *(b->base_dst) + b->dst, b->dst_stride);
......@@ -70,8 +68,6 @@ void vp8_encode_intra4x4block_rd(const VP8_ENCODER_RTCD *rtcd, MACROBLOCK *x, BL
x->quantize_b(be, b);
x->e_mbd.mode_info_context->mbmi.mb_skip_coeff &= (!b->eob);
IDCT_INVOKE(&rtcd->common->idct, idct16)(b->dqcoeff, b->diff, 32);
RECON_INVOKE(&rtcd->common->recon, recon)(b->predictor, b->diff, *(b->base_dst) + b->dst, b->dst_stride);
......@@ -157,8 +153,6 @@ void vp8_encode_intra16x16mbyrd(const VP8_ENCODER_RTCD *rtcd, MACROBLOCK *x)
vp8_transform_intra_mby(x);
x->e_mbd.mode_info_context->mbmi.mb_skip_coeff = 1;
vp8_quantize_mby(x);
vp8_inverse_transform_mby(IF_RTCD(&rtcd->common->idct), &x->e_mbd);
......
......@@ -547,35 +547,6 @@ void vp8_optimize_mb(MACROBLOCK *x, const VP8_ENCODER_RTCD *rtcd)
}
static void vp8_find_mb_skip_coef(MACROBLOCK *x)
{
int i;
x->e_mbd.mode_info_context->mbmi.mb_skip_coeff = 1;
if (x->e_mbd.mode_info_context->mbmi.mode != B_PRED && x->e_mbd.mode_info_context->mbmi.mode != SPLITMV)
{
for (i = 0; i < 16; i++)
{
x->e_mbd.mode_info_context->mbmi.mb_skip_coeff &= (x->e_mbd.block[i].eob < 2);
}
for (i = 16; i < 25; i++)
{
x->e_mbd.mode_info_context->mbmi.mb_skip_coeff &= (!x->e_mbd.block[i].eob);
}
}
else
{
for (i = 0; i < 24; i++)
{
x->e_mbd.mode_info_context->mbmi.mb_skip_coeff &= (!x->e_mbd.block[i].eob);
}
}
}
void vp8_optimize_mby(MACROBLOCK *x, const VP8_ENCODER_RTCD *rtcd)
{
int b;
......@@ -663,10 +634,7 @@ void vp8_encode_inter16x16(const VP8_ENCODER_RTCD *rtcd, MACROBLOCK *x)
#if !(CONFIG_REALTIME_ONLY)
if (x->optimize && x->rddiv > 1)
{
vp8_optimize_mb(x, rtcd);
vp8_find_mb_skip_coef(x);
}
#endif
vp8_inverse_transform_mb(IF_RTCD(&rtcd->common->idct), &x->e_mbd);
......
......@@ -273,17 +273,10 @@ void vp8_quantize_mby(MACROBLOCK *x)
&& x->e_mbd.mode_info_context->mbmi.mode != SPLITMV);
for (i = 0; i < 16; i++)
{
x->quantize_b(&x->block[i], &x->e_mbd.block[i]);
x->e_mbd.mode_info_context->mbmi.mb_skip_coeff &=
(x->e_mbd.block[i].eob <= has_2nd_order);
}
if(has_2nd_order)
{
x->quantize_b(&x->block[24], &x->e_mbd.block[24]);
x->e_mbd.mode_info_context->mbmi.mb_skip_coeff &= (!x->e_mbd.block[24].eob);
}
}
void vp8_quantize_mb(MACROBLOCK *x)
......@@ -292,13 +285,8 @@ void vp8_quantize_mb(MACROBLOCK *x)
int has_2nd_order=(x->e_mbd.mode_info_context->mbmi.mode != B_PRED
&& x->e_mbd.mode_info_context->mbmi.mode != SPLITMV);
x->e_mbd.mode_info_context->mbmi.mb_skip_coeff = 1;
for (i = 0; i < 24+has_2nd_order; i++)
{
x->quantize_b(&x->block[i], &x->e_mbd.block[i]);
x->e_mbd.mode_info_context->mbmi.mb_skip_coeff &=
(x->e_mbd.block[i].eob <= (has_2nd_order && i<16));
}
}
......@@ -307,8 +295,5 @@ void vp8_quantize_mbuv(MACROBLOCK *x)
int i;
for (i = 16; i < 24; i++)
{
x->quantize_b(&x->block[i], &x->e_mbd.block[i]);
x->e_mbd.mode_info_context->mbmi.mb_skip_coeff &= (!x->e_mbd.block[i].eob);
}
}
......@@ -198,6 +198,28 @@ static void tokenize1st_order_b
}
static int mb_is_skippable(MACROBLOCKD *x)
{
int has_y2_block;
int skip = 1;
int i = 0;
has_y2_block = (x->mode_info_context->mbmi.mode != B_PRED
&& x->mode_info_context->mbmi.mode != SPLITMV);
if (has_y2_block)
{
for (i = 0; i < 16; i++)
skip &= (x->block[i].eob < 2);
}
for (; i < 24 + has_y2_block; i++)
skip &= (!x->block[i].eob);
return skip;
}
void vp8_tokenize_mb(VP8_COMP *cpi, MACROBLOCKD *x, TOKENEXTRA **t)
{
ENTROPY_CONTEXT * A = (ENTROPY_CONTEXT *)x->above_context;
......@@ -223,6 +245,7 @@ void vp8_tokenize_mb(VP8_COMP *cpi, MACROBLOCKD *x, TOKENEXTRA **t)
#if 1
x->mode_info_context->mbmi.mb_skip_coeff = mb_is_skippable(x);
if (x->mode_info_context->mbmi.mb_skip_coeff)
{
......@@ -247,35 +270,6 @@ void vp8_tokenize_mb(VP8_COMP *cpi, MACROBLOCKD *x, TOKENEXTRA **t)
cpi->skip_false_count++;
#endif
#if 0
if (x->mbmi.mode == B_PRED || x->mbmi.mode == SPLITMV)
{
int i, skip = 1;
for (i = 0; i < 24; i++)
skip &= (!x->block[i].eob);
if (skip != x->mbmi.mb_skip_coeff)
skip += 0;
x->mbmi.mb_skip_coeff = skip;
}
else
{
int i, skip = 1;
for (i = 0; i < 16; i++)
skip &= (x->block[i].eob < 2);
for (i = 16; i < 25; i++)
skip &= (!x->block[i].eob);
if (skip != x->mbmi.mb_skip_coeff)
skip += 0;
x->mbmi.mb_skip_coeff = skip;
}
vpx_memcpy(cpi->coef_counts_backup, cpi->coef_counts, sizeof(cpi->coef_counts));
#endif
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment