Commit 1fcb5fc7 authored by Jingning Han's avatar Jingning Han

Refactor motion vector residual coding process

This commit separates the predicted motion vector from the nearestmv
motion vector in the coding process for both regular and sub8x8
block sizes.

Change-Id: I703490513b0194e6669ebf719352db015facb3e1
parent deb33056
......@@ -102,7 +102,7 @@ typedef struct {
PREDICTION_MODE as_mode;
int_mv as_mv[2]; // first, second inter predictor motion vectors
#if CONFIG_REF_MV
int_mv pred_mv[2];
int_mv pred_mv_s8[2];
#endif
#if CONFIG_EXT_INTER
int_mv ref_mv[2];
......
......@@ -811,11 +811,12 @@ void vp10_append_sub8x8_mvs_for_idx(VP10_COMMON *cm, MACROBLOCKD *xd,
ref_mv_stack[above_count] = tmp_mv;
}
for (idx = 0; idx < VPXMIN(MAX_MV_REF_CANDIDATES, *ref_mv_count); ++idx) {
mv_list[idx].as_int = ref_mv_stack[idx].this_mv.as_int;
clamp_mv_ref(&mv_list[idx].as_mv,
for (idx = 0; idx < *ref_mv_count; ++idx)
clamp_mv_ref(&ref_mv_stack[idx].this_mv.as_mv,
xd->n8_w << 3, xd->n8_h << 3, xd);
}
for (idx = 0; idx < VPXMIN(MAX_MV_REF_CANDIDATES, *ref_mv_count); ++idx)
mv_list[idx].as_int = ref_mv_stack[idx].this_mv.as_int;
#endif
near_mv->as_int = 0;
......
......@@ -156,7 +156,7 @@ static INLINE int_mv get_sub_block_pred_mv(const MODE_INFO *candidate,
int search_col, int block_idx) {
return block_idx >= 0 && candidate->mbmi.sb_type < BLOCK_8X8
? candidate->bmi[idx_n_column_to_subblock[block_idx][search_col == 0]]
.pred_mv[which_mv]
.pred_mv_s8[which_mv]
: candidate->mbmi.pred_mv[which_mv];
}
#endif
......
......@@ -933,7 +933,7 @@ static INLINE int assign_mv(VP10_COMMON *cm, MACROBLOCKD *xd,
MB_MODE_INFO *mbmi = &xd->mi[0]->mbmi;
BLOCK_SIZE bsize = mbmi->sb_type;
int_mv *pred_mv = (bsize >= BLOCK_8X8) ?
mbmi->pred_mv : xd->mi[0]->bmi[block].pred_mv;
mbmi->pred_mv : xd->mi[0]->bmi[block].pred_mv_s8;
#endif
switch (mode) {
......@@ -1366,6 +1366,7 @@ static void read_inter_block_mode_info(VP10Decoder *const pbi,
for (idx = 0; idx < 2; idx += num_4x4_w) {
int_mv block[2];
const int j = idy * 2 + idx;
int_mv ref_mv_s8[2];
#if CONFIG_REF_MV
#if CONFIG_EXT_INTER
if (!is_compound)
......@@ -1423,6 +1424,13 @@ static void read_inter_block_mode_info(VP10Decoder *const pbi,
#endif // CONFIG_EXT_INTER
}
for (ref = 0; ref < 2; ++ref) {
ref_mv_s8[ref] = nearestmv[ref];
}
#if CONFIG_EXT_INTER
(void)ref_mv_s8;
#endif
if (!assign_mv(cm, xd, b_mode,
#if CONFIG_REF_MV
j,
......@@ -1431,7 +1439,7 @@ static void read_inter_block_mode_info(VP10Decoder *const pbi,
#if CONFIG_EXT_INTER
ref_mv[mv_idx],
#else
nearestmv,
ref_mv_s8,
#endif // CONFIG_EXT_INTER
nearest_sub8x8, near_sub8x8,
is_compound, allow_hp, r)) {
......@@ -1451,8 +1459,8 @@ static void read_inter_block_mode_info(VP10Decoder *const pbi,
}
#if CONFIG_REF_MV
mbmi->pred_mv[0].as_int = mi->bmi[3].pred_mv[0].as_int;
mbmi->pred_mv[1].as_int = mi->bmi[3].pred_mv[1].as_int;
mbmi->pred_mv[0].as_int = mi->bmi[3].pred_mv_s8[0].as_int;
mbmi->pred_mv[1].as_int = mi->bmi[3].pred_mv_s8[1].as_int;
#endif
mi->mbmi.mode = b_mode;
......@@ -1460,19 +1468,19 @@ static void read_inter_block_mode_info(VP10Decoder *const pbi,
mbmi->mv[1].as_int = mi->bmi[3].as_mv[1].as_int;
} else {
int ref;
int_mv ref_mv[2] = { nearestmv[0], nearestmv[1] };
for (ref = 0; ref < 1 + is_compound && mbmi->mode == NEWMV; ++ref) {
int_mv ref_mv = nearestmv[ref];
#if CONFIG_REF_MV
uint8_t ref_frame_type = vp10_ref_frame_type(mbmi->ref_frame);
if (xd->ref_mv_count[ref_frame_type] > 1) {
ref_mv = (ref == 0) ?
ref_mv[ref] = (ref == 0) ?
xd->ref_mv_stack[ref_frame_type][mbmi->ref_mv_idx].this_mv :
xd->ref_mv_stack[ref_frame_type][mbmi->ref_mv_idx].comp_mv;
clamp_mv_ref(&ref_mv.as_mv, xd->n8_w << 3, xd->n8_h << 3, xd);
lower_mv_precision(&ref_mv.as_mv, allow_hp);
clamp_mv_ref(&ref_mv[ref].as_mv, xd->n8_w << 3, xd->n8_h << 3, xd);
lower_mv_precision(&ref_mv[ref].as_mv, allow_hp);
}
#endif
nearestmv[ref] = ref_mv;
nearestmv[ref] = ref_mv[ref];
}
xd->corrupted |= !assign_mv(cm, xd, mbmi->mode,
......@@ -1484,7 +1492,7 @@ static void read_inter_block_mode_info(VP10Decoder *const pbi,
mbmi->mode == NEWFROMNEARMV ?
nearmv : nearestmv,
#else
nearestmv,
ref_mv,
#endif // CONFIG_EXT_INTER
nearestmv, nearmv, is_compound, allow_hp, r);
}
......
......@@ -1185,8 +1185,12 @@ static void pack_inter_mode_mvs(VP10_COMP *cpi, const MODE_INFO *mi,
vp10_encode_mv(cpi, w, &mi->bmi[j].as_mv[ref].as_mv,
#if CONFIG_EXT_INTER
&mi->bmi[j].ref_mv[ref].as_mv,
#else
#if CONFIG_REF_MV
&mi->bmi[j].pred_mv_s8[ref].as_mv,
#else
&mbmi_ext->ref_mvs[mbmi->ref_frame[ref]][0].as_mv,
#endif // CONFIG_REF_MV
#endif // CONFIG_EXT_INTER
nmvc, allow_hp);
}
......
......@@ -268,6 +268,9 @@ void vp10_build_nmv_cost_table(int *mvjoint, int *mvcost[2],
#if CONFIG_EXT_INTER
static void inc_mvs(const MB_MODE_INFO *mbmi, const MB_MODE_INFO_EXT *mbmi_ext,
const int_mv mvs[2],
#if CONFIG_REF_MV
const int_mv pred_mvs[2],
#endif
nmv_context_counts *nmv_counts) {
int i;
PREDICTION_MODE mode = mbmi->mode;
......@@ -285,6 +288,7 @@ static void inc_mvs(const MB_MODE_INFO *mbmi, const MB_MODE_INFO_EXT *mbmi_ext,
int nmv_ctx = vp10_nmv_ctx(mbmi_ext->ref_mv_count[mbmi->ref_frame[i]],
mbmi_ext->ref_mv_stack[mbmi->ref_frame[i]]);
nmv_context_counts *counts = &nmv_counts[nmv_ctx];
(void)pred_mvs;
#endif
vp10_inc_mv(&diff, counts, vp10_use_mv_hp(ref));
}
......@@ -363,6 +367,9 @@ static void inc_mvs_sub8x8(const MODE_INFO *mi,
#else
static void inc_mvs(const MB_MODE_INFO *mbmi, const MB_MODE_INFO_EXT *mbmi_ext,
const int_mv mvs[2],
#if CONFIG_REF_MV
const int_mv pred_mvs[2],
#endif
nmv_context_counts *nmv_counts) {
int i;
#if !CONFIG_REF_MV
......@@ -374,8 +381,10 @@ static void inc_mvs(const MB_MODE_INFO *mbmi, const MB_MODE_INFO_EXT *mbmi_ext,
int nmv_ctx = vp10_nmv_ctx(mbmi_ext->ref_mv_count[mbmi->ref_frame[i]],
mbmi_ext->ref_mv_stack[mbmi->ref_frame[i]]);
nmv_context_counts *counts = &nmv_counts[nmv_ctx];
#endif
const MV *ref = &pred_mvs[i].as_mv;
#else
const MV *ref = &mbmi_ext->ref_mvs[mbmi->ref_frame[i]][0].as_mv;
#endif
const MV diff = {mvs[i].as_mv.row - ref->row,
mvs[i].as_mv.col - ref->col};
vp10_inc_mv(&diff, counts, vp10_use_mv_hp(ref));
......@@ -411,6 +420,7 @@ void vp10_update_mv_count(ThreadData *td) {
if (mi->bmi[i].as_mode == NEWMV)
inc_mvs(mbmi, mbmi_ext, mi->bmi[i].as_mv,
#if CONFIG_REF_MV
mi->bmi[i].pred_mv_s8,
td->counts->mv);
#else
&td->counts->mv);
......@@ -426,6 +436,7 @@ void vp10_update_mv_count(ThreadData *td) {
#endif // CONFIG_EXT_INTER
inc_mvs(mbmi, mbmi_ext, mbmi->mv,
#if CONFIG_REF_MV
mbmi->pred_mv,
td->counts->mv);
#else
&td->counts->mv);
......
......@@ -4207,15 +4207,13 @@ static int set_and_cost_bmi_mvs(VP10_COMP *cpi, MACROBLOCK *x, MACROBLOCKD *xd,
#if CONFIG_REF_MV
if (mode == NEWMV) {
mic->bmi[i].pred_mv[0].as_int =
mbmi_ext->ref_mvs[mbmi->ref_frame[0]][0].as_int;
mic->bmi[i].pred_mv_s8[0].as_int = best_ref_mv[0]->as_int;
if (is_compound)
mic->bmi[i].pred_mv[1].as_int =
mbmi_ext->ref_mvs[mbmi->ref_frame[1]][0].as_int;
mic->bmi[i].pred_mv_s8[1].as_int = best_ref_mv[1]->as_int;
} else {
mic->bmi[i].pred_mv[0].as_int = this_mv[0].as_int;
mic->bmi[i].pred_mv_s8[0].as_int = this_mv[0].as_int;
if (is_compound)
mic->bmi[i].pred_mv[1].as_int = this_mv[1].as_int;
mic->bmi[i].pred_mv_s8[1].as_int = this_mv[1].as_int;
}
#endif
......@@ -4355,6 +4353,9 @@ typedef struct {
int64_t bsse;
int64_t brdcost;
int_mv mvs[2];
#if CONFIG_REF_MV
int_mv pred_mv[2];
#endif
#if CONFIG_EXT_INTER
int_mv ref_mv[2];
#endif // CONFIG_EXT_INTER
......@@ -4853,6 +4854,12 @@ static int64_t rd_pick_best_sub8x8_mode(VP10_COMP *cpi, MACROBLOCK *x,
#endif // CONFIG_EXT_INTER
&frame_mv[NEARESTMV][frame],
&frame_mv[NEARMV][frame]);
#if CONFIG_REF_MV
if (ref_mv_count[ref] > 0)
bsi->ref_mv[ref] = &ref_mv_stack[ref][0].this_mv;
#endif
#if CONFIG_EXT_INTER
mv_ref_list[0].as_int = frame_mv[NEARESTMV][frame].as_int;
mv_ref_list[1].as_int = frame_mv[NEARMV][frame].as_int;
......@@ -5138,6 +5145,16 @@ static int64_t rd_pick_best_sub8x8_mode(VP10_COMP *cpi, MACROBLOCK *x,
if (num_4x4_blocks_high > 1)
bsi->rdstat[i + 2][mode_idx].mvs[ref].as_int =
mode_mv[this_mode][ref].as_int;
#if CONFIG_REF_MV
bsi->rdstat[i][mode_idx].pred_mv[ref].as_int =
mi->bmi[i].pred_mv_s8[ref].as_int;
if (num_4x4_blocks_wide > 1)
bsi->rdstat[i + 1][mode_idx].pred_mv[ref].as_int =
mi->bmi[i].pred_mv_s8[ref].as_int;
if (num_4x4_blocks_high > 1)
bsi->rdstat[i + 2][mode_idx].pred_mv[ref].as_int =
mi->bmi[i].pred_mv_s8[ref].as_int;
#endif
#if CONFIG_EXT_INTER
bsi->rdstat[i][mode_idx].ref_mv[ref].as_int =
bsi->ref_mv[ref]->as_int;
......@@ -5312,6 +5329,11 @@ static int64_t rd_pick_best_sub8x8_mode(VP10_COMP *cpi, MACROBLOCK *x,
mi->bmi[i].as_mv[0].as_int = bsi->rdstat[i][mode_idx].mvs[0].as_int;
if (has_second_ref(mbmi))
mi->bmi[i].as_mv[1].as_int = bsi->rdstat[i][mode_idx].mvs[1].as_int;
#if CONFIG_REF_MV
mi->bmi[i].pred_mv_s8[0] = bsi->rdstat[i][mode_idx].pred_mv[0];
if (has_second_ref(mbmi))
mi->bmi[i].pred_mv_s8[1] = bsi->rdstat[i][mode_idx].pred_mv[1];
#endif
#if CONFIG_EXT_INTER
mi->bmi[i].ref_mv[0].as_int = bsi->rdstat[i][mode_idx].ref_mv[0].as_int;
if (has_second_rf)
......@@ -9641,8 +9663,8 @@ void vp10_rd_pick_inter_mode_sub8x8(struct VP10_COMP *cpi,
mbmi->mv[0].as_int = xd->mi[0]->bmi[3].as_mv[0].as_int;
mbmi->mv[1].as_int = xd->mi[0]->bmi[3].as_mv[1].as_int;
#if CONFIG_REF_MV
mbmi->pred_mv[0].as_int = xd->mi[0]->bmi[3].pred_mv[0].as_int;
mbmi->pred_mv[1].as_int = xd->mi[0]->bmi[3].pred_mv[1].as_int;
mbmi->pred_mv[0].as_int = xd->mi[0]->bmi[3].pred_mv_s8[0].as_int;
mbmi->pred_mv[1].as_int = xd->mi[0]->bmi[3].pred_mv_s8[1].as_int;
#endif
}
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment