Commit f6f0ffe9 authored by John Koleszar's avatar John Koleszar Committed by Code Review
Browse files

Merge "Remove secondary mv clamping from decode stage"

parents c97a9fc1 3085025f
......@@ -174,9 +174,8 @@ typedef struct
int dc_diff;
unsigned char segment_id; // Which set of segmentation parameters should be used for this MB
int force_no_skip;
int need_to_clamp_mvs;
B_MODE_INFO partition_bmi[16];
} MB_MODE_INFO;
......
......@@ -171,6 +171,7 @@ void vp8_decode_mode_mvs(VP8D_COMP *pbi)
VP8_COMMON *const pc = &pbi->common;
MACROBLOCKD *xd = &pbi->mb;
mbmi->need_to_clamp_mvs = 0;
vp8dx_bool_decoder_fill(bc);
// Distance of Mb to the various image edges.
......@@ -269,6 +270,17 @@ void vp8_decode_mode_mvs(VP8D_COMP *pbi)
break;
}
if (mv->col < xd->mb_to_left_edge
- LEFT_TOP_MARGIN
|| mv->col > xd->mb_to_right_edge
+ RIGHT_BOTTOM_MARGIN
|| mv->row < xd->mb_to_top_edge
- LEFT_TOP_MARGIN
|| mv->row > xd->mb_to_bottom_edge
+ RIGHT_BOTTOM_MARGIN
)
mbmi->need_to_clamp_mvs = 1;
/* Fill (uniform) modes, mvs of jth subset.
Must do it here because ensuing subsets can
refer back to us via "left" or "above". */
......@@ -325,27 +337,18 @@ void vp8_decode_mode_mvs(VP8D_COMP *pbi)
read_mv(bc, mv, (const MV_CONTEXT *) mvc);
mv->row += best_mv.row;
mv->col += best_mv.col;
/* Encoder should not produce invalid motion vectors, but since
* arbitrary length MVs can be parsed from the bitstream, we
* need to clamp them here in case we're reading bad data to
* avoid a crash.
*/
#if CONFIG_DEBUG
assert(mv->col >= (xd->mb_to_left_edge - LEFT_TOP_MARGIN));
assert(mv->col <= (xd->mb_to_right_edge + RIGHT_BOTTOM_MARGIN));
assert(mv->row >= (xd->mb_to_top_edge - LEFT_TOP_MARGIN));
assert(mv->row <= (xd->mb_to_bottom_edge + RIGHT_BOTTOM_MARGIN));
#endif
if (mv->col < (xd->mb_to_left_edge - LEFT_TOP_MARGIN))
mv->col = xd->mb_to_left_edge - LEFT_TOP_MARGIN;
else if (mv->col > xd->mb_to_right_edge + RIGHT_BOTTOM_MARGIN)
mv->col = xd->mb_to_right_edge + RIGHT_BOTTOM_MARGIN;
if (mv->row < (xd->mb_to_top_edge - LEFT_TOP_MARGIN))
mv->row = xd->mb_to_top_edge - LEFT_TOP_MARGIN;
else if (mv->row > xd->mb_to_bottom_edge + RIGHT_BOTTOM_MARGIN)
mv->row = xd->mb_to_bottom_edge + RIGHT_BOTTOM_MARGIN;
/* Don't need to check this on NEARMV and NEARESTMV modes
* since those modes clamp the MV. The NEWMV mode does not,
* so signal to the prediction stage whether special
* handling may be required.
*/
if (mv->col < xd->mb_to_left_edge - LEFT_TOP_MARGIN
|| mv->col > xd->mb_to_right_edge + RIGHT_BOTTOM_MARGIN
|| mv->row < xd->mb_to_top_edge - LEFT_TOP_MARGIN
|| mv->row > xd->mb_to_bottom_edge + RIGHT_BOTTOM_MARGIN
)
mbmi->need_to_clamp_mvs = 1;
propagate_mv: /* same MV throughout */
{
......@@ -381,7 +384,6 @@ void vp8_decode_mode_mvs(VP8D_COMP *pbi)
assert(0);
#endif
}
}
else
{
......
/*
* Copyright (c) 2010 The VP8 project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
......@@ -126,6 +126,47 @@ static void skip_recon_mb(VP8D_COMP *pbi, MACROBLOCKD *xd)
}
}
static void clamp_mv_to_umv_border(MV *mv, const MACROBLOCKD *xd)
{
/* If the MV points so far into the UMV border that no visible pixels
* are used for reconstruction, the subpel part of the MV can be
* discarded and the MV limited to 16 pixels with equivalent results.
*
* This limit kicks in at 19 pixels for the top and left edges, for
* the 16 pixels plus 3 taps right of the central pixel when subpel
* filtering. The bottom and right edges use 16 pixels plus 2 pixels
* left of the central pixel when filtering.
*/
if (mv->col < (xd->mb_to_left_edge - (19 << 3)))
mv->col = xd->mb_to_left_edge - (16 << 3);
else if (mv->col > xd->mb_to_right_edge + (18 << 3))
mv->col = xd->mb_to_right_edge + (16 << 3);
if (mv->row < (xd->mb_to_top_edge - (19 << 3)))
mv->row = xd->mb_to_top_edge - (16 << 3);
else if (mv->row > xd->mb_to_bottom_edge + (18 << 3))
mv->row = xd->mb_to_bottom_edge + (16 << 3);
}
static void clamp_mvs(MACROBLOCKD *xd)
{
if (xd->mbmi.mode == SPLITMV)
{
int i;
for (i=0; i<16; i++)
clamp_mv_to_umv_border(&xd->block[i].bmi.mv.as_mv, xd);
}
else
{
clamp_mv_to_umv_border(&xd->mbmi.mv.as_mv, xd);
clamp_mv_to_umv_border(&xd->block[16].bmi.mv.as_mv, xd);
}
}
static void reconstruct_mb(VP8D_COMP *pbi, MACROBLOCKD *xd)
{
if (xd->frame_type == KEY_FRAME || xd->mbmi.ref_frame == INTRA_FRAME)
......@@ -233,6 +274,8 @@ static void de_quantand_idct(VP8D_COMP *pbi, MACROBLOCKD *xd)
void vp8_decode_macroblock(VP8D_COMP *pbi, MACROBLOCKD *xd)
{
int eobtotal = 0;
MV orig_mvs[24];
int i, do_clamp = xd->mbmi.need_to_clamp_mvs;
if (xd->mbmi.mb_skip_coeff)
{
......@@ -243,20 +286,50 @@ void vp8_decode_macroblock(VP8D_COMP *pbi, MACROBLOCKD *xd)
eobtotal = vp8_decode_mb_tokens(pbi, xd);
}
xd->mode_info_context->mbmi.dc_diff = 1;
if (xd->mbmi.mode != B_PRED && xd->mbmi.mode != SPLITMV && eobtotal == 0)
/* Perform temporary clamping of the MV to be used for prediction */
if (do_clamp)
{
xd->mode_info_context->mbmi.dc_diff = 0;
skip_recon_mb(pbi, xd);
return;
if (xd->mbmi.mode == SPLITMV)
for (i=0; i<24; i++)
orig_mvs[i] = xd->block[i].bmi.mv.as_mv;
else
{
orig_mvs[0] = xd->mbmi.mv.as_mv;
orig_mvs[1] = xd->block[16].bmi.mv.as_mv;
}
clamp_mvs(xd);
}
if (xd->segmentation_enabled)
mb_init_dequantizer(pbi, xd);
xd->mode_info_context->mbmi.dc_diff = 1;
do {
if (xd->mbmi.mode != B_PRED && xd->mbmi.mode != SPLITMV && eobtotal == 0)
{
xd->mode_info_context->mbmi.dc_diff = 0;
skip_recon_mb(pbi, xd);
break;
}
de_quantand_idct(pbi, xd);
reconstruct_mb(pbi, xd);
if (xd->segmentation_enabled)
mb_init_dequantizer(pbi, xd);
de_quantand_idct(pbi, xd);
reconstruct_mb(pbi, xd);
} while(0);
/* Restore the original MV so as not to affect the entropy context. */
if (do_clamp)
{
if (xd->mbmi.mode == SPLITMV)
for (i=0; i<24; i++)
xd->block[i].bmi.mv.as_mv = orig_mvs[i];
else
{
xd->mbmi.mv.as_mv = orig_mvs[0];
xd->block[16].bmi.mv.as_mv = orig_mvs[1];
}
}
}
static int get_delta_q(vp8_reader *bc, int prev, int *q_update)
......@@ -314,7 +387,9 @@ void vp8_decode_mb_row(VP8D_COMP *pbi,
for (mb_col = 0; mb_col < pc->mb_cols; mb_col++)
{
// Take a copy of the mode and Mv information for this macroblock into the xd->mbmi
vpx_memcpy(&xd->mbmi, &xd->mode_info_context->mbmi, 32); //sizeof(MB_MODE_INFO) );
// the partition_bmi array is unused in the decoder, so don't copy it.
vpx_memcpy(&xd->mbmi, &xd->mode_info_context->mbmi,
sizeof(MB_MODE_INFO) - sizeof(xd->mbmi.partition_bmi));
if (xd->mbmi.mode == SPLITMV || xd->mbmi.mode == B_PRED)
{
......
/*
* Copyright (c) 2010 The VP8 project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
......@@ -154,7 +154,9 @@ THREAD_FUNCTION vp8_thread_decoding_proc(void *p_data)
}
// Take a copy of the mode and Mv information for this macroblock into the xd->mbmi
vpx_memcpy(&xd->mbmi, &xd->mode_info_context->mbmi, 32); //sizeof(MB_MODE_INFO) );
// the partition_bmi array is unused in the decoder, so don't copy it.
vpx_memcpy(&xd->mbmi, &xd->mode_info_context->mbmi,
sizeof(MB_MODE_INFO) - sizeof(xd->mbmi.partition_bmi));
if (xd->mbmi.mode == SPLITMV || xd->mbmi.mode == B_PRED)
{
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment