Commit caed92d0 authored by Scott LaVarnway's avatar Scott LaVarnway Committed by Yaowu Xu
Browse files

Import a decoder bug fix from public stable branch

Please see the following public commit for details:
https://gerrit.chromium.org/gerrit/#change,7608

Change-Id: I589eed0b6078e2c5c9c74e942886e503bd02b273
parent 3ff8c7d9
...@@ -373,6 +373,42 @@ void vp8_build_inter16x16_predictors_mby(MACROBLOCKD *x) ...@@ -373,6 +373,42 @@ void vp8_build_inter16x16_predictors_mby(MACROBLOCKD *x)
} }
} }
static void clamp_mv_to_umv_border(MV *mv, const MACROBLOCKD *xd)
{
/* If the MV points so far into the UMV border that no visible pixels
* are used for reconstruction, the subpel part of the MV can be
* discarded and the MV limited to 16 pixels with equivalent results.
*
* This limit kicks in at 19 pixels for the top and left edges, for
* the 16 pixels plus 3 taps right of the central pixel when subpel
* filtering. The bottom and right edges use 16 pixels plus 2 pixels
* left of the central pixel when filtering.
*/
if (mv->col < (xd->mb_to_left_edge - (19 << 3)))
mv->col = xd->mb_to_left_edge - (16 << 3);
else if (mv->col > xd->mb_to_right_edge + (18 << 3))
mv->col = xd->mb_to_right_edge + (16 << 3);
if (mv->row < (xd->mb_to_top_edge - (19 << 3)))
mv->row = xd->mb_to_top_edge - (16 << 3);
else if (mv->row > xd->mb_to_bottom_edge + (18 << 3))
mv->row = xd->mb_to_bottom_edge + (16 << 3);
}
/* A version of the above function for chroma block MVs.*/
static void clamp_uvmv_to_umv_border(MV *mv, const MACROBLOCKD *xd)
{
mv->col = (2*mv->col < (xd->mb_to_left_edge - (19 << 3))) ?
(xd->mb_to_left_edge - (16 << 3)) >> 1 : mv->col;
mv->col = (2*mv->col > xd->mb_to_right_edge + (18 << 3)) ?
(xd->mb_to_right_edge + (16 << 3)) >> 1 : mv->col;
mv->row = (2*mv->row < (xd->mb_to_top_edge - (19 << 3))) ?
(xd->mb_to_top_edge - (16 << 3)) >> 1 : mv->row;
mv->row = (2*mv->row > xd->mb_to_bottom_edge + (18 << 3)) ?
(xd->mb_to_bottom_edge + (16 << 3)) >> 1 : mv->row;
}
void vp8_build_inter16x16_predictors_mb(MACROBLOCKD *x, void vp8_build_inter16x16_predictors_mb(MACROBLOCKD *x,
unsigned char *dst_y, unsigned char *dst_y,
unsigned char *dst_u, unsigned char *dst_u,
...@@ -384,17 +420,23 @@ void vp8_build_inter16x16_predictors_mb(MACROBLOCKD *x, ...@@ -384,17 +420,23 @@ void vp8_build_inter16x16_predictors_mb(MACROBLOCKD *x,
unsigned char *ptr; unsigned char *ptr;
unsigned char *uptr, *vptr; unsigned char *uptr, *vptr;
int mv_row = x->mode_info_context->mbmi.mv.as_mv.row; int_mv _16x16mv;
int mv_col = x->mode_info_context->mbmi.mv.as_mv.col;
unsigned char *ptr_base = x->pre.y_buffer; unsigned char *ptr_base = x->pre.y_buffer;
int pre_stride = x->block[0].pre_stride; int pre_stride = x->block[0].pre_stride;
ptr = ptr_base + (mv_row >> 3) * pre_stride + (mv_col >> 3); _16x16mv.as_int = x->mode_info_context->mbmi.mv.as_int;
if ((mv_row | mv_col) & 7) if (x->mode_info_context->mbmi.need_to_clamp_mvs)
{
clamp_mv_to_umv_border(&_16x16mv.as_mv, x);
}
ptr = ptr_base + ( _16x16mv.as_mv.row >> 3) * pre_stride + (_16x16mv.as_mv.col >> 3);
if ( _16x16mv.as_int & 0x00070007)
{ {
x->subpixel_predict16x16(ptr, pre_stride, mv_col & 7, mv_row & 7, dst_y, dst_ystride); x->subpixel_predict16x16(ptr, pre_stride, _16x16mv.as_mv.col & 7, _16x16mv.as_mv.row & 7, dst_y, dst_ystride);
} }
else else
{ {
...@@ -402,31 +444,31 @@ void vp8_build_inter16x16_predictors_mb(MACROBLOCKD *x, ...@@ -402,31 +444,31 @@ void vp8_build_inter16x16_predictors_mb(MACROBLOCKD *x,
} }
/* calc uv motion vectors */ /* calc uv motion vectors */
if (mv_row < 0) if ( _16x16mv.as_mv.row < 0)
mv_row -= 1; _16x16mv.as_mv.row -= 1;
else else
mv_row += 1; _16x16mv.as_mv.row += 1;
if (mv_col < 0) if (_16x16mv.as_mv.col < 0)
mv_col -= 1; _16x16mv.as_mv.col -= 1;
else else
mv_col += 1; _16x16mv.as_mv.col += 1;
mv_row /= 2; _16x16mv.as_mv.row /= 2;
mv_col /= 2; _16x16mv.as_mv.col /= 2;
mv_row &= x->fullpixel_mask; _16x16mv.as_mv.row &= x->fullpixel_mask;
mv_col &= x->fullpixel_mask; _16x16mv.as_mv.col &= x->fullpixel_mask;
pre_stride >>= 1; pre_stride >>= 1;
offset = (mv_row >> 3) * pre_stride + (mv_col >> 3); offset = ( _16x16mv.as_mv.row >> 3) * pre_stride + (_16x16mv.as_mv.col >> 3);
uptr = x->pre.u_buffer + offset; uptr = x->pre.u_buffer + offset;
vptr = x->pre.v_buffer + offset; vptr = x->pre.v_buffer + offset;
if ((mv_row | mv_col) & 7) if ( _16x16mv.as_int & 0x00070007)
{ {
x->subpixel_predict8x8(uptr, pre_stride, mv_col & 7, mv_row & 7, dst_u, dst_uvstride); x->subpixel_predict8x8(uptr, pre_stride, _16x16mv.as_mv.col & 7, _16x16mv.as_mv.row & 7, dst_u, dst_uvstride);
x->subpixel_predict8x8(vptr, pre_stride, mv_col & 7, mv_row & 7, dst_v, dst_uvstride); x->subpixel_predict8x8(vptr, pre_stride, _16x16mv.as_mv.col & 7, _16x16mv.as_mv.row & 7, dst_v, dst_uvstride);
} }
else else
{ {
...@@ -515,6 +557,14 @@ static void build_inter4x4_predictors_mb(MACROBLOCKD *x) ...@@ -515,6 +557,14 @@ static void build_inter4x4_predictors_mb(MACROBLOCKD *x)
x->block[ 8].bmi = x->mode_info_context->bmi[ 8]; x->block[ 8].bmi = x->mode_info_context->bmi[ 8];
x->block[10].bmi = x->mode_info_context->bmi[10]; x->block[10].bmi = x->mode_info_context->bmi[10];
if (x->mode_info_context->mbmi.need_to_clamp_mvs)
{
clamp_mv_to_umv_border(&x->block[ 0].bmi.mv.as_mv, x);
clamp_mv_to_umv_border(&x->block[ 2].bmi.mv.as_mv, x);
clamp_mv_to_umv_border(&x->block[ 8].bmi.mv.as_mv, x);
clamp_mv_to_umv_border(&x->block[10].bmi.mv.as_mv, x);
}
build_inter_predictors4b(x, &x->block[ 0], 16); build_inter_predictors4b(x, &x->block[ 0], 16);
build_inter_predictors4b(x, &x->block[ 2], 16); build_inter_predictors4b(x, &x->block[ 2], 16);
build_inter_predictors4b(x, &x->block[ 8], 16); build_inter_predictors4b(x, &x->block[ 8], 16);
...@@ -530,6 +580,12 @@ static void build_inter4x4_predictors_mb(MACROBLOCKD *x) ...@@ -530,6 +580,12 @@ static void build_inter4x4_predictors_mb(MACROBLOCKD *x)
x->block[i+0].bmi = x->mode_info_context->bmi[i+0]; x->block[i+0].bmi = x->mode_info_context->bmi[i+0];
x->block[i+1].bmi = x->mode_info_context->bmi[i+1]; x->block[i+1].bmi = x->mode_info_context->bmi[i+1];
if (x->mode_info_context->mbmi.need_to_clamp_mvs)
{
clamp_mv_to_umv_border(&x->block[i+0].bmi.mv.as_mv, x);
clamp_mv_to_umv_border(&x->block[i+1].bmi.mv.as_mv, x);
}
if (d0->bmi.mv.as_int == d1->bmi.mv.as_int) if (d0->bmi.mv.as_int == d1->bmi.mv.as_int)
build_inter_predictors2b(x, d0, 16); build_inter_predictors2b(x, d0, 16);
else else
...@@ -592,6 +648,9 @@ void build_4x4uvmvs(MACROBLOCKD *x) ...@@ -592,6 +648,9 @@ void build_4x4uvmvs(MACROBLOCKD *x)
x->block[uoffset].bmi.mv.as_mv.col = (temp / 8) & x->fullpixel_mask; x->block[uoffset].bmi.mv.as_mv.col = (temp / 8) & x->fullpixel_mask;
if (x->mode_info_context->mbmi.need_to_clamp_mvs)
clamp_uvmv_to_umv_border(&x->block[uoffset].bmi.mv.as_mv, x);
x->block[voffset].bmi.mv.as_mv.row = x->block[voffset].bmi.mv.as_mv.row =
x->block[uoffset].bmi.mv.as_mv.row ; x->block[uoffset].bmi.mv.as_mv.row ;
x->block[voffset].bmi.mv.as_mv.col = x->block[voffset].bmi.mv.as_mv.col =
......
...@@ -158,57 +158,6 @@ static void skip_recon_mb(VP8D_COMP *pbi, MACROBLOCKD *xd) ...@@ -158,57 +158,6 @@ static void skip_recon_mb(VP8D_COMP *pbi, MACROBLOCKD *xd)
} }
static void clamp_mv_to_umv_border(MV *mv, const MACROBLOCKD *xd)
{
/* If the MV points so far into the UMV border that no visible pixels
* are used for reconstruction, the subpel part of the MV can be
* discarded and the MV limited to 16 pixels with equivalent results.
*
* This limit kicks in at 19 pixels for the top and left edges, for
* the 16 pixels plus 3 taps right of the central pixel when subpel
* filtering. The bottom and right edges use 16 pixels plus 2 pixels
* left of the central pixel when filtering.
*/
if (mv->col < (xd->mb_to_left_edge - (19 << 3)))
mv->col = xd->mb_to_left_edge - (16 << 3);
else if (mv->col > xd->mb_to_right_edge + (18 << 3))
mv->col = xd->mb_to_right_edge + (16 << 3);
if (mv->row < (xd->mb_to_top_edge - (19 << 3)))
mv->row = xd->mb_to_top_edge - (16 << 3);
else if (mv->row > xd->mb_to_bottom_edge + (18 << 3))
mv->row = xd->mb_to_bottom_edge + (16 << 3);
}
/* A version of the above function for chroma block MVs.*/
static void clamp_uvmv_to_umv_border(MV *mv, const MACROBLOCKD *xd)
{
mv->col = (2*mv->col < (xd->mb_to_left_edge - (19 << 3))) ? (xd->mb_to_left_edge - (16 << 3)) >> 1 : mv->col;
mv->col = (2*mv->col > xd->mb_to_right_edge + (18 << 3)) ? (xd->mb_to_right_edge + (16 << 3)) >> 1 : mv->col;
mv->row = (2*mv->row < (xd->mb_to_top_edge - (19 << 3))) ? (xd->mb_to_top_edge - (16 << 3)) >> 1 : mv->row;
mv->row = (2*mv->row > xd->mb_to_bottom_edge + (18 << 3)) ? (xd->mb_to_bottom_edge + (16 << 3)) >> 1 : mv->row;
}
void clamp_mvs(MACROBLOCKD *xd)
{
if (xd->mode_info_context->mbmi.mode == SPLITMV)
{
int i;
for (i=0; i<16; i++)
clamp_mv_to_umv_border(&xd->block[i].bmi.mv.as_mv, xd);
for (i=16; i<24; i++)
clamp_uvmv_to_umv_border(&xd->block[i].bmi.mv.as_mv, xd);
}
else
{
clamp_mv_to_umv_border(&xd->mode_info_context->mbmi.mv.as_mv, xd);
clamp_uvmv_to_umv_border(&xd->block[16].bmi.mv.as_mv, xd);
}
}
extern const int vp8_i8x8_block[4]; extern const int vp8_i8x8_block[4];
static void decode_macroblock(VP8D_COMP *pbi, MACROBLOCKD *xd, static void decode_macroblock(VP8D_COMP *pbi, MACROBLOCKD *xd,
unsigned int mb_idx) unsigned int mb_idx)
...@@ -256,12 +205,6 @@ static void decode_macroblock(VP8D_COMP *pbi, MACROBLOCKD *xd, ...@@ -256,12 +205,6 @@ static void decode_macroblock(VP8D_COMP *pbi, MACROBLOCKD *xd,
#endif #endif
} }
/* Perform temporary clamping of the MV to be used for prediction */
if (xd->mode_info_context->mbmi.need_to_clamp_mvs)
{
clamp_mvs(xd);
}
mode = xd->mode_info_context->mbmi.mode; mode = xd->mode_info_context->mbmi.mode;
if (eobtotal == 0 && mode != B_PRED && mode != SPLITMV if (eobtotal == 0 && mode != B_PRED && mode != SPLITMV
......
...@@ -27,7 +27,6 @@ ...@@ -27,7 +27,6 @@
#endif #endif
extern void mb_init_dequantizer(VP8D_COMP *pbi, MACROBLOCKD *xd); extern void mb_init_dequantizer(VP8D_COMP *pbi, MACROBLOCKD *xd);
extern void clamp_mvs(MACROBLOCKD *xd);
#if CONFIG_RUNTIME_CPU_DETECT #if CONFIG_RUNTIME_CPU_DETECT
#define RTCD_VTABLE(x) (&(pbi)->common.rtcd.x) #define RTCD_VTABLE(x) (&(pbi)->common.rtcd.x)
...@@ -109,7 +108,7 @@ static void decode_macroblock(VP8D_COMP *pbi, MACROBLOCKD *xd, int mb_row, int m ...@@ -109,7 +108,7 @@ static void decode_macroblock(VP8D_COMP *pbi, MACROBLOCKD *xd, int mb_row, int m
{ {
int eobtotal = 0; int eobtotal = 0;
int throw_residual = 0; int throw_residual = 0;
int i, do_clamp = xd->mode_info_context->mbmi.need_to_clamp_mvs; int i;
if (xd->mode_info_context->mbmi.mb_skip_coeff) if (xd->mode_info_context->mbmi.mb_skip_coeff)
{ {
...@@ -120,12 +119,6 @@ static void decode_macroblock(VP8D_COMP *pbi, MACROBLOCKD *xd, int mb_row, int m ...@@ -120,12 +119,6 @@ static void decode_macroblock(VP8D_COMP *pbi, MACROBLOCKD *xd, int mb_row, int m
eobtotal = vp8_decode_mb_tokens(pbi, xd); eobtotal = vp8_decode_mb_tokens(pbi, xd);
} }
/* Perform temporary clamping of the MV to be used for prediction */
if (do_clamp)
{
clamp_mvs(xd);
}
eobtotal |= (xd->mode_info_context->mbmi.mode == B_PRED || eobtotal |= (xd->mode_info_context->mbmi.mode == B_PRED ||
xd->mode_info_context->mbmi.mode == SPLITMV); xd->mode_info_context->mbmi.mode == SPLITMV);
if (!eobtotal && !vp8dx_bool_error(xd->current_bc)) if (!eobtotal && !vp8dx_bool_error(xd->current_bc))
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment