Commit 581636d7 authored by hui su's avatar hui su

Refactor codes about motion search

1. Add "best_mv" in MACROBLOCK to store the best motion vector
during motion search, so that we don't need to pass its pointer
to various motion search functions.

2. Declare some functions as static when possible.

3. Fix some indents.

Change-Id: I0778146c0866cbc55e245988c59222577ea8260e
parent ebc2d34c
......@@ -165,6 +165,9 @@ struct macroblock {
// Used to store sub partition's choices.
MV pred_mv[MAX_REF_FRAMES];
// Store the best motion vector during motion search
int_mv best_mv;
// Strong color activity detection. Used in RTC coding mode to enhance
// the visual quality at the boundary of moving color objects.
uint8_t color_sensitivity[2];
......
......@@ -447,7 +447,7 @@ typedef struct VP10_COMP {
ActiveMap active_map;
fractional_mv_step_fp *find_fractional_mv_step;
vp10_full_search_fn_t full_search_sad;
vp10_full_search_fn_t full_search_sad; // It is currently unused.
vp10_diamond_search_fn_t diamond_search_sad;
vp10_variance_fn_ptr_t fn_ptr[BLOCK_SIZES];
uint64_t time_receive_data;
......
......@@ -25,7 +25,6 @@
static unsigned int do_16x16_motion_iteration(VP10_COMP *cpi,
const MV *ref_mv,
MV *dst_mv,
int mb_row,
int mb_col) {
MACROBLOCK *const x = &cpi->td.mb;
......@@ -51,8 +50,7 @@ static unsigned int do_16x16_motion_iteration(VP10_COMP *cpi,
/*cpi->sf.search_method == HEX*/
vp10_hex_search(x, &ref_full, step_param, x->errorperbit, 0,
cond_cost_list(cpi, cost_list),
&v_fn_ptr, 0, ref_mv, dst_mv);
cond_cost_list(cpi, cost_list), &v_fn_ptr, 0, ref_mv);
// Try sub-pixel MC
// if (bestsme > error_thresh && bestsme < INT_MAX)
......@@ -60,7 +58,7 @@ static unsigned int do_16x16_motion_iteration(VP10_COMP *cpi,
int distortion;
unsigned int sse;
cpi->find_fractional_mv_step(
x, dst_mv, ref_mv, cpi->common.allow_high_precision_mv, x->errorperbit,
x, ref_mv, cpi->common.allow_high_precision_mv, x->errorperbit,
&v_fn_ptr, 0, mv_sf->subpel_iters_per_step,
cond_cost_list(cpi, cost_list),
NULL, NULL,
......@@ -74,7 +72,7 @@ static unsigned int do_16x16_motion_iteration(VP10_COMP *cpi,
#endif // CONFIG_EXT_INTER
xd->mi[0]->mbmi.mode = NEWMV;
xd->mi[0]->mbmi.mv[0].as_mv = *dst_mv;
xd->mi[0]->mbmi.mv[0] = x->best_mv;
#if CONFIG_EXT_INTER
xd->mi[0]->mbmi.ref_frame[1] = NONE;
#endif // CONFIG_EXT_INTER
......@@ -92,40 +90,40 @@ static unsigned int do_16x16_motion_iteration(VP10_COMP *cpi,
}
static int do_16x16_motion_search(VP10_COMP *cpi, const MV *ref_mv,
int_mv *dst_mv, int mb_row, int mb_col) {
int mb_row, int mb_col) {
MACROBLOCK *const x = &cpi->td.mb;
MACROBLOCKD *const xd = &x->e_mbd;
unsigned int err, tmp_err;
MV tmp_mv;
MV best_mv;
// Try zero MV first
// FIXME should really use something like near/nearest MV and/or MV prediction
err = vpx_sad16x16(x->plane[0].src.buf, x->plane[0].src.stride,
xd->plane[0].pre[0].buf, xd->plane[0].pre[0].stride);
dst_mv->as_int = 0;
best_mv.col = best_mv.row = 0;
// Test last reference frame using the previous best mv as the
// starting point (best reference) for the search
tmp_err = do_16x16_motion_iteration(cpi, ref_mv, &tmp_mv, mb_row, mb_col);
tmp_err = do_16x16_motion_iteration(cpi, ref_mv, mb_row, mb_col);
if (tmp_err < err) {
err = tmp_err;
dst_mv->as_mv = tmp_mv;
best_mv = x->best_mv.as_mv;
}
// If the current best reference mv is not centered on 0,0 then do a 0,0
// based search as well.
if (ref_mv->row != 0 || ref_mv->col != 0) {
unsigned int tmp_err;
MV zero_ref_mv = {0, 0}, tmp_mv;
MV zero_ref_mv = {0, 0};
tmp_err = do_16x16_motion_iteration(cpi, &zero_ref_mv, &tmp_mv,
mb_row, mb_col);
tmp_err = do_16x16_motion_iteration(cpi, &zero_ref_mv, mb_row, mb_col);
if (tmp_err < err) {
dst_mv->as_mv = tmp_mv;
err = tmp_err;
best_mv = x->best_mv.as_mv;
}
}
x->best_mv.as_mv = best_mv;
return err;
}
......@@ -213,8 +211,8 @@ static void update_mbgraph_mb_stats
xd->plane[0].pre[0].stride = golden_ref->y_stride;
g_motion_error = do_16x16_motion_search(cpi,
prev_golden_ref_mv,
&stats->ref[GOLDEN_FRAME].m.mv,
mb_row, mb_col);
stats->ref[GOLDEN_FRAME].m.mv = x->best_mv;
stats->ref[GOLDEN_FRAME].err = g_motion_error;
} else {
stats->ref[GOLDEN_FRAME].err = INT_MAX;
......
This diff is collapsed.
......@@ -82,31 +82,24 @@ int vp10_full_pixel_diamond(const struct VP10_COMP *cpi, MACROBLOCK *x,
// Perform integral projection based motion estimation.
unsigned int vp10_int_pro_motion_estimation(const struct VP10_COMP *cpi,
MACROBLOCK *x,
BLOCK_SIZE bsize,
int mi_row, int mi_col);
typedef int (integer_mv_pattern_search_fn) (
const MACROBLOCK *x,
MV *ref_mv,
int search_param,
int error_per_bit,
int do_init_search,
int *cost_list,
const vp10_variance_fn_ptr_t *vf,
int use_mvcost,
const MV *center_mv,
MV *best_mv);
MACROBLOCK *x,
BLOCK_SIZE bsize,
int mi_row, int mi_col);
integer_mv_pattern_search_fn vp10_hex_search;
integer_mv_pattern_search_fn vp10_bigdia_search;
integer_mv_pattern_search_fn vp10_square_search;
integer_mv_pattern_search_fn vp10_fast_hex_search;
integer_mv_pattern_search_fn vp10_fast_dia_search;
int vp10_hex_search(MACROBLOCK *x,
MV *start_mv,
int search_param,
int sad_per_bit,
int do_init_search,
int *cost_list,
const vp10_variance_fn_ptr_t *vfp,
int use_mvcost,
const MV *center_mv);
typedef int (fractional_mv_step_fp) (
const MACROBLOCK *x,
MV *bestmv, const MV *ref_mv,
MACROBLOCK *x,
const MV *ref_mv,
int allow_hp,
int error_per_bit,
const vp10_variance_fn_ptr_t *vfp,
......@@ -124,39 +117,32 @@ extern fractional_mv_step_fp vp10_find_best_sub_pixel_tree_pruned_more;
extern fractional_mv_step_fp vp10_find_best_sub_pixel_tree_pruned_evenmore;
typedef int (*vp10_full_search_fn_t)(const MACROBLOCK *x,
const MV *ref_mv, int sad_per_bit,
int distance,
const vp10_variance_fn_ptr_t *fn_ptr,
const MV *center_mv, MV *best_mv);
typedef int (*vp10_refining_search_fn_t)(const MACROBLOCK *x,
MV *ref_mv, int sad_per_bit,
int distance,
const MV *ref_mv, int sad_per_bit,
int distance,
const vp10_variance_fn_ptr_t *fn_ptr,
const MV *center_mv, MV *best_mv);
typedef int (*vp10_diamond_search_fn_t)(const MACROBLOCK *x,
const search_site_config *cfg,
MV *ref_mv, MV *best_mv,
int search_param, int sad_per_bit,
int *num00,
const vp10_variance_fn_ptr_t *fn_ptr,
const MV *center_mv);
typedef int (*vp10_diamond_search_fn_t)(const MACROBLOCK *x,
const search_site_config *cfg,
MV *ref_mv, MV *best_mv,
int search_param, int sad_per_bit,
int *num00,
const vp10_variance_fn_ptr_t *fn_ptr,
const MV *center_mv);
int vp10_refining_search_8p_c(const MACROBLOCK *x,
MV *ref_mv, int error_per_bit,
int search_range,
const vp10_variance_fn_ptr_t *fn_ptr,
const MV *center_mv, const uint8_t *second_pred);
int vp10_refining_search_8p_c(MACROBLOCK *x,
int error_per_bit,
int search_range,
const vp10_variance_fn_ptr_t *fn_ptr,
const MV *center_mv, const uint8_t *second_pred);
struct VP10_COMP;
int vp10_full_pixel_search(struct VP10_COMP *cpi, MACROBLOCK *x,
BLOCK_SIZE bsize, MV *mvp_full,
int step_param, int error_per_bit,
int *cost_list,
const MV *ref_mv, MV *tmp_mv,
int var_max, int rd);
BLOCK_SIZE bsize, MV *mvp_full,
int step_param, int error_per_bit,
int *cost_list, const MV *ref_mv,
int var_max, int rd);
#if CONFIG_EXT_INTER
int vp10_find_best_masked_sub_pixel_tree(const MACROBLOCK *x,
......
......@@ -4753,7 +4753,7 @@ static void joint_motion_search(VP10_COMP *cpi, MACROBLOCK *x,
struct buf_2d ref_yv12[2];
int bestsme = INT_MAX;
int sadpb = x->sadperbit16;
MV tmp_mv;
MV *const best_mv = &x->best_mv.as_mv;
int search_range = 3;
int tmp_col_min = x->mv_col_min;
......@@ -4814,23 +4814,22 @@ static void joint_motion_search(VP10_COMP *cpi, MACROBLOCK *x,
vp10_set_mv_search_range(x, &ref_mv[id].as_mv);
// Use the mv result from the single mode as mv predictor.
tmp_mv = frame_mv[refs[id]].as_mv;
*best_mv = frame_mv[refs[id]].as_mv;
tmp_mv.col >>= 3;
tmp_mv.row >>= 3;
best_mv->col >>= 3;
best_mv->row >>= 3;
#if CONFIG_REF_MV
vp10_set_mvcost(x, refs[id]);
#endif
// Small-range full-pixel motion search.
bestsme = vp10_refining_search_8p_c(x, &tmp_mv, sadpb,
search_range,
&cpi->fn_ptr[bsize],
&ref_mv[id].as_mv, second_pred);
bestsme = vp10_refining_search_8p_c(x, sadpb, search_range,
&cpi->fn_ptr[bsize],
&ref_mv[id].as_mv, second_pred);
if (bestsme < INT_MAX)
bestsme = vp10_get_mvpred_av_var(x, &tmp_mv, &ref_mv[id].as_mv,
second_pred, &cpi->fn_ptr[bsize], 1);
bestsme = vp10_get_mvpred_av_var(x, best_mv, &ref_mv[id].as_mv,
second_pred, &cpi->fn_ptr[bsize], 1);
x->mv_col_min = tmp_col_min;
x->mv_col_max = tmp_col_max;
......@@ -4859,8 +4858,7 @@ static void joint_motion_search(VP10_COMP *cpi, MACROBLOCK *x,
pd->pre[0].stride)) << 3];
bestsme = cpi->find_fractional_mv_step(
x, &tmp_mv,
&ref_mv[id].as_mv,
x, &ref_mv[id].as_mv,
cpi->common.allow_high_precision_mv,
x->errorperbit,
&cpi->fn_ptr[bsize],
......@@ -4875,8 +4873,7 @@ static void joint_motion_search(VP10_COMP *cpi, MACROBLOCK *x,
} else {
(void) block;
bestsme = cpi->find_fractional_mv_step(
x, &tmp_mv,
&ref_mv[id].as_mv,
x, &ref_mv[id].as_mv,
cpi->common.allow_high_precision_mv,
x->errorperbit,
&cpi->fn_ptr[bsize],
......@@ -4893,7 +4890,7 @@ static void joint_motion_search(VP10_COMP *cpi, MACROBLOCK *x,
xd->plane[0].pre[0] = ref_yv12[0];
if (bestsme < last_besterr[id]) {
frame_mv[refs[id]].as_mv = tmp_mv;
frame_mv[refs[id]].as_mv = *best_mv;
last_besterr[id] = bestsme;
} else {
break;
......@@ -5196,11 +5193,6 @@ static int64_t rd_pick_best_sub8x8_mode(VP10_COMP *cpi, MACROBLOCK *x,
run_mv_search)
#endif // CONFIG_EXT_INTER
) {
#if CONFIG_EXT_INTER
MV *const new_mv = &mode_mv[this_mode][0].as_mv;
#else
MV *const new_mv = &mode_mv[NEWMV][0].as_mv;
#endif // CONFIG_EXT_INTER
int step_param = 0;
int bestsme = INT_MAX;
int sadpb = x->sadperbit4;
......@@ -5268,8 +5260,7 @@ static int64_t rd_pick_best_sub8x8_mode(VP10_COMP *cpi, MACROBLOCK *x,
bestsme = vp10_full_pixel_search(
cpi, x, bsize, &mvp_full, step_param, sadpb,
cpi->sf.mv.subpel_search_method != SUBPEL_TREE ? cost_list : NULL,
&bsi->ref_mv[0]->as_mv, new_mv,
INT_MAX, 1);
&bsi->ref_mv[0]->as_mv, INT_MAX, 1);
if (bestsme < INT_MAX) {
int distortion;
......@@ -5294,9 +5285,7 @@ static int64_t rd_pick_best_sub8x8_mode(VP10_COMP *cpi, MACROBLOCK *x,
pd->pre[0].stride)) << 3];
cpi->find_fractional_mv_step(
x,
new_mv,
&bsi->ref_mv[0]->as_mv,
x, &bsi->ref_mv[0]->as_mv,
cm->allow_high_precision_mv,
x->errorperbit, &cpi->fn_ptr[bsize],
cpi->sf.mv.subpel_force_stop,
......@@ -5311,9 +5300,7 @@ static int64_t rd_pick_best_sub8x8_mode(VP10_COMP *cpi, MACROBLOCK *x,
pd->pre[0] = backup_pred;
} else {
cpi->find_fractional_mv_step(
x,
new_mv,
&bsi->ref_mv[0]->as_mv,
x, &bsi->ref_mv[0]->as_mv,
cm->allow_high_precision_mv,
x->errorperbit, &cpi->fn_ptr[bsize],
cpi->sf.mv.subpel_force_stop,
......@@ -5327,14 +5314,20 @@ static int64_t rd_pick_best_sub8x8_mode(VP10_COMP *cpi, MACROBLOCK *x,
// save motion search result for use in compound prediction
#if CONFIG_EXT_INTER
seg_mvs[i][mv_idx][mbmi->ref_frame[0]].as_mv = *new_mv;
seg_mvs[i][mv_idx][mbmi->ref_frame[0]].as_mv = x->best_mv.as_mv;
#else
seg_mvs[i][mbmi->ref_frame[0]].as_mv = *new_mv;
seg_mvs[i][mbmi->ref_frame[0]].as_mv = x->best_mv.as_mv;
#endif // CONFIG_EXT_INTER
}
if (cpi->sf.adaptive_motion_search)
x->pred_mv[mbmi->ref_frame[0]] = *new_mv;
x->pred_mv[mbmi->ref_frame[0]] = x->best_mv.as_mv;
#if CONFIG_EXT_INTER
mode_mv[this_mode][0] = x->best_mv;
#else
mode_mv[NEWMV][0] = x->best_mv;
#endif // CONFIG_EXT_INTER
// restore src pointers
mi_buf_restore(x, orig_src, orig_pre);
......@@ -5903,7 +5896,7 @@ static void single_motion_search(VP10_COMP *cpi, MACROBLOCK *x,
int ref_idx,
int mv_idx,
#endif // CONFIG_EXT_INTER
int_mv *tmp_mv, int *rate_mv) {
int *rate_mv) {
MACROBLOCKD *xd = &x->e_mbd;
const VP10_COMMON *cm = &cpi->common;
MB_MODE_INFO *mbmi = &xd->mi[0]->mbmi;
......@@ -5985,7 +5978,7 @@ static void single_motion_search(VP10_COMP *cpi, MACROBLOCK *x,
if ((x->pred_mv_sad[ref] >> 3) > x->pred_mv_sad[i]) {
x->pred_mv[ref].row = 0;
x->pred_mv[ref].col = 0;
tmp_mv->as_int = INVALID_MV;
x->best_mv.as_int = INVALID_MV;
if (scaled_ref_frame) {
int i;
......@@ -6005,7 +5998,7 @@ static void single_motion_search(VP10_COMP *cpi, MACROBLOCK *x,
bestsme = vp10_full_pixel_search(cpi, x, bsize, &mvp_full, step_param, sadpb,
cond_cost_list(cpi, cost_list),
&ref_mv, &tmp_mv->as_mv, INT_MAX, 1);
&ref_mv, INT_MAX, 1);
x->mv_col_min = tmp_col_min;
x->mv_col_max = tmp_col_max;
......@@ -6027,7 +6020,7 @@ static void single_motion_search(VP10_COMP *cpi, MACROBLOCK *x,
upsampled_ref->y_stride, (mi_row << 3), (mi_col << 3),
NULL, pd->subsampling_x, pd->subsampling_y);
bestsme = cpi->find_fractional_mv_step(x, &tmp_mv->as_mv, &ref_mv,
bestsme = cpi->find_fractional_mv_step(x, &ref_mv,
cm->allow_high_precision_mv,
x->errorperbit,
&cpi->fn_ptr[bsize],
......@@ -6041,7 +6034,7 @@ static void single_motion_search(VP10_COMP *cpi, MACROBLOCK *x,
// Restore the reference frames.
pd->pre[ref_idx] = backup_pred;
} else {
cpi->find_fractional_mv_step(x, &tmp_mv->as_mv, &ref_mv,
cpi->find_fractional_mv_step(x, &ref_mv,
cm->allow_high_precision_mv,
x->errorperbit,
&cpi->fn_ptr[bsize],
......@@ -6052,11 +6045,11 @@ static void single_motion_search(VP10_COMP *cpi, MACROBLOCK *x,
&dis, &x->pred_sse[ref], NULL, 0, 0, 0);
}
}
*rate_mv = vp10_mv_bit_cost(&tmp_mv->as_mv, &ref_mv,
*rate_mv = vp10_mv_bit_cost(&x->best_mv.as_mv, &ref_mv,
x->nmvjointcost, x->mvcost, MV_COST_WEIGHT);
if (cpi->sf.adaptive_motion_search)
x->pred_mv[ref] = tmp_mv->as_mv;
x->pred_mv[ref] = x->best_mv.as_mv;
if (scaled_ref_frame) {
int i;
......@@ -6990,34 +6983,32 @@ static int64_t handle_inter_mode(VP10_COMP *cpi, MACROBLOCK *x,
}
#endif // CONFIG_EXT_INTER
} else {
int_mv tmp_mv;
#if CONFIG_EXT_INTER
if (is_comp_interintra_pred) {
tmp_mv = single_newmvs[mv_idx][refs[0]];
x->best_mv = single_newmvs[mv_idx][refs[0]];
rate_mv = single_newmvs_rate[mv_idx][refs[0]];
} else {
single_motion_search(cpi, x, bsize, mi_row, mi_col,
0, mv_idx, &tmp_mv, &rate_mv);
single_newmvs[mv_idx][refs[0]] = tmp_mv;
0, mv_idx, &rate_mv);
single_newmvs[mv_idx][refs[0]] = x->best_mv;
single_newmvs_rate[mv_idx][refs[0]] = rate_mv;
}
#else
single_motion_search(cpi, x, bsize, mi_row, mi_col, &tmp_mv, &rate_mv);
single_newmv[refs[0]] = tmp_mv;
single_motion_search(cpi, x, bsize, mi_row, mi_col, &rate_mv);
single_newmv[refs[0]] = x->best_mv;
#endif // CONFIG_EXT_INTER
if (tmp_mv.as_int == INVALID_MV)
if (x->best_mv.as_int == INVALID_MV)
return INT64_MAX;
frame_mv[refs[0]] = tmp_mv;
xd->mi[0]->bmi[0].as_mv[0] = tmp_mv;
frame_mv[refs[0]] = x->best_mv;
xd->mi[0]->bmi[0].as_mv[0] = x->best_mv;
// Estimate the rate implications of a new mv but discount this
// under certain circumstances where we want to help initiate a weak
// motion field, where the distortion gain for a single block may not
// be enough to overcome the cost of a new mv.
if (discount_newmv_test(cpi, this_mode, tmp_mv, mode_mv, refs[0])) {
if (discount_newmv_test(cpi, this_mode, x->best_mv, mode_mv, refs[0])) {
rate_mv = VPXMAX((rate_mv / NEW_MV_DISCOUNT_FACTOR), 1);
}
}
......
......@@ -288,7 +288,6 @@ static int temporal_filter_find_matching_mb_c(VP10_COMP *cpi,
MV best_ref_mv1 = {0, 0};
MV best_ref_mv1_full; /* full-pixel value of best_ref_mv1 */
MV *ref_mv = &x->e_mbd.mi[0]->bmi[0].as_mv[0].as_mv;
// Save input state
struct buf_2d src = x->plane[0].src;
......@@ -315,12 +314,11 @@ static int temporal_filter_find_matching_mb_c(VP10_COMP *cpi,
// Ignore mv costing by sending NULL pointer instead of cost arrays
vp10_hex_search(x, &best_ref_mv1_full, step_param, sadpb, 1,
cond_cost_list(cpi, cost_list),
&cpi->fn_ptr[BLOCK_16X16], 0, &best_ref_mv1, ref_mv);
cond_cost_list(cpi, cost_list),
&cpi->fn_ptr[BLOCK_16X16], 0, &best_ref_mv1);
// Ignore mv costing by sending NULL pointer instead of cost array
bestsme = cpi->find_fractional_mv_step(x, ref_mv,
&best_ref_mv1,
bestsme = cpi->find_fractional_mv_step(x, &best_ref_mv1,
cpi->common.allow_high_precision_mv,
x->errorperbit,
&cpi->fn_ptr[BLOCK_16X16],
......@@ -329,6 +327,8 @@ static int temporal_filter_find_matching_mb_c(VP10_COMP *cpi,
NULL, NULL,
&distortion, &sse, NULL, 0, 0, 0);
x->e_mbd.mi[0]->bmi[0].as_mv[0] = x->best_mv;
// Restore input state
x->plane[0].src = src;
xd->plane[0].pre[0] = pre;
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment