Commit 93748c3e authored by Jingning Han's avatar Jingning Han
Browse files

Enable dynamic motion vector referencing for newmv mode

This commit enables the dynamic motion vector predictor for NEWMV
mode. It allows the codec to select the best motion vector predictor
in a rate-distortion optimization framework for motion vector
residual coding. The compression performance is improved:
lowres  0.14%
midres  0.27%
hdres   0.24%

Change-Id: I6a601c74eb6cb0b71a613336d40363359f2edecd
parent 51f95129
...@@ -155,7 +155,29 @@ static void read_drl_idx(const VP10_COMMON *cm, ...@@ -155,7 +155,29 @@ static void read_drl_idx(const VP10_COMMON *cm,
uint8_t ref_frame_type = vp10_ref_frame_type(mbmi->ref_frame); uint8_t ref_frame_type = vp10_ref_frame_type(mbmi->ref_frame);
mbmi->ref_mv_idx = 0; mbmi->ref_mv_idx = 0;
if (xd->ref_mv_count[ref_frame_type] > 2) { if (xd->ref_mv_count[ref_frame_type] > 1 && mbmi->mode == NEWMV) {
uint8_t drl_ctx = vp10_drl_ctx(xd->ref_mv_stack[ref_frame_type], 0);
vpx_prob drl_prob = cm->fc->drl_prob0[drl_ctx];
if (!vpx_read(r, drl_prob)) {
mbmi->ref_mv_idx = 0;
return;
}
mbmi->ref_mv_idx = 1;
if (xd->ref_mv_count[ref_frame_type] > 2) {
drl_ctx = vp10_drl_ctx(xd->ref_mv_stack[ref_frame_type], 1);
drl_prob = cm->fc->drl_prob0[drl_ctx];
if (!vpx_read(r, drl_prob)) {
mbmi->ref_mv_idx = 1;
return;
}
mbmi->ref_mv_idx = 2;
}
return;
}
if (xd->ref_mv_count[ref_frame_type] > 2 && mbmi->mode == NEARMV) {
uint8_t drl0_ctx = vp10_drl_ctx(xd->ref_mv_stack[ref_frame_type], 1); uint8_t drl0_ctx = vp10_drl_ctx(xd->ref_mv_stack[ref_frame_type], 1);
vpx_prob drl0_prob = cm->fc->drl_prob0[drl0_ctx]; vpx_prob drl0_prob = cm->fc->drl_prob0[drl0_ctx];
if (vpx_read(r, drl0_prob)) { if (vpx_read(r, drl0_prob)) {
...@@ -1243,7 +1265,7 @@ static void read_inter_block_mode_info(VP10Decoder *const pbi, ...@@ -1243,7 +1265,7 @@ static void read_inter_block_mode_info(VP10Decoder *const pbi,
#endif // CONFIG_REF_MV && CONFIG_EXT_INTER #endif // CONFIG_REF_MV && CONFIG_EXT_INTER
r, mode_ctx); r, mode_ctx);
#if CONFIG_REF_MV #if CONFIG_REF_MV
if (mbmi->mode == NEARMV) if (mbmi->mode == NEARMV || mbmi->mode == NEWMV)
read_drl_idx(cm, xd, mbmi, r); read_drl_idx(cm, xd, mbmi, r);
#endif #endif
} }
...@@ -1436,6 +1458,22 @@ static void read_inter_block_mode_info(VP10Decoder *const pbi, ...@@ -1436,6 +1458,22 @@ static void read_inter_block_mode_info(VP10Decoder *const pbi,
mbmi->mv[0].as_int = mi->bmi[3].as_mv[0].as_int; mbmi->mv[0].as_int = mi->bmi[3].as_mv[0].as_int;
mbmi->mv[1].as_int = mi->bmi[3].as_mv[1].as_int; mbmi->mv[1].as_int = mi->bmi[3].as_mv[1].as_int;
} else { } else {
int ref;
for (ref = 0; ref < 1 + is_compound && mbmi->mode == NEWMV; ++ref) {
int_mv ref_mv = nearestmv[ref];
#if CONFIG_REF_MV
uint8_t ref_frame_type = vp10_ref_frame_type(mbmi->ref_frame);
if (xd->ref_mv_count[ref_frame_type] > 1) {
ref_mv = (ref == 0) ?
xd->ref_mv_stack[ref_frame_type][mbmi->ref_mv_idx].this_mv :
xd->ref_mv_stack[ref_frame_type][mbmi->ref_mv_idx].comp_mv;
clamp_mv_ref(&ref_mv.as_mv, xd->n8_w << 3, xd->n8_h << 3, xd);
lower_mv_precision(&ref_mv.as_mv, allow_hp);
}
#endif
nearestmv[ref] = ref_mv;
}
xd->corrupted |= !assign_mv(cm, xd, mbmi->mode, xd->corrupted |= !assign_mv(cm, xd, mbmi->mode,
#if CONFIG_REF_MV #if CONFIG_REF_MV
0, 0,
......
...@@ -193,7 +193,31 @@ static void write_drl_idx(const VP10_COMMON *cm, ...@@ -193,7 +193,31 @@ static void write_drl_idx(const VP10_COMMON *cm,
const MB_MODE_INFO_EXT *mbmi_ext, const MB_MODE_INFO_EXT *mbmi_ext,
vpx_writer *w) { vpx_writer *w) {
uint8_t ref_frame_type = vp10_ref_frame_type(mbmi->ref_frame); uint8_t ref_frame_type = vp10_ref_frame_type(mbmi->ref_frame);
if (mbmi_ext->ref_mv_count[ref_frame_type] > 2) {
assert(mbmi->ref_mv_idx < 3);
if (mbmi_ext->ref_mv_count[ref_frame_type] > 1 && mbmi->mode == NEWMV) {
uint8_t drl_ctx =
vp10_drl_ctx(mbmi_ext->ref_mv_stack[ref_frame_type], 0);
vpx_prob drl_prob = cm->fc->drl_prob0[drl_ctx];
vpx_write(w, mbmi->ref_mv_idx != 0, drl_prob);
if (mbmi->ref_mv_idx == 0)
return;
if (mbmi_ext->ref_mv_count[ref_frame_type] > 2) {
drl_ctx = vp10_drl_ctx(mbmi_ext->ref_mv_stack[ref_frame_type], 1);
drl_prob = cm->fc->drl_prob0[drl_ctx];
vpx_write(w, mbmi->ref_mv_idx != 1, drl_prob);
}
if (mbmi->ref_mv_idx == 1)
return;
assert(mbmi->ref_mv_idx == 2);
return;
}
if (mbmi_ext->ref_mv_count[ref_frame_type] > 2 && mbmi->mode == NEARMV) {
uint8_t drl0_ctx = uint8_t drl0_ctx =
vp10_drl_ctx(mbmi_ext->ref_mv_stack[ref_frame_type], 1); vp10_drl_ctx(mbmi_ext->ref_mv_stack[ref_frame_type], 1);
vpx_prob drl0_prob = cm->fc->drl_prob0[drl0_ctx]; vpx_prob drl0_prob = cm->fc->drl_prob0[drl0_ctx];
...@@ -1088,7 +1112,7 @@ static void pack_inter_mode_mvs(VP10_COMP *cpi, const MODE_INFO *mi, ...@@ -1088,7 +1112,7 @@ static void pack_inter_mode_mvs(VP10_COMP *cpi, const MODE_INFO *mi,
mode_ctx); mode_ctx);
#if CONFIG_REF_MV #if CONFIG_REF_MV
if (mode == NEARMV) if (mode == NEARMV || mode == NEWMV)
write_drl_idx(cm, mbmi, mbmi_ext, w); write_drl_idx(cm, mbmi, mbmi_ext, w);
#endif #endif
} }
...@@ -1175,13 +1199,15 @@ static void pack_inter_mode_mvs(VP10_COMP *cpi, const MODE_INFO *mi, ...@@ -1175,13 +1199,15 @@ static void pack_inter_mode_mvs(VP10_COMP *cpi, const MODE_INFO *mi,
#else #else
if (mode == NEWMV) { if (mode == NEWMV) {
#endif // CONFIG_EXT_INTER #endif // CONFIG_EXT_INTER
int_mv ref_mv;
for (ref = 0; ref < 1 + is_compound; ++ref) { for (ref = 0; ref < 1 + is_compound; ++ref) {
#if CONFIG_REF_MV #if CONFIG_REF_MV
int nmv_ctx = int nmv_ctx =
vp10_nmv_ctx(mbmi_ext->ref_mv_count[mbmi->ref_frame[ref]], vp10_nmv_ctx(mbmi_ext->ref_mv_count[mbmi->ref_frame[ref]],
mbmi_ext->ref_mv_stack[mbmi->ref_frame[ref]]); mbmi_ext->ref_mv_stack[mbmi->ref_frame[ref]]);
const nmv_context *nmvc = &cm->fc->nmvc[nmv_ctx]; const nmv_context *nmvc = &cm->fc->nmvc[nmv_ctx];
#endif #endif
ref_mv = mbmi_ext->ref_mvs[mbmi->ref_frame[ref]][0];
#if CONFIG_EXT_INTER #if CONFIG_EXT_INTER
if (mode == NEWFROMNEARMV) if (mode == NEWFROMNEARMV)
vp10_encode_mv(cpi, w, &mbmi->mv[ref].as_mv, vp10_encode_mv(cpi, w, &mbmi->mv[ref].as_mv,
...@@ -1190,8 +1216,8 @@ static void pack_inter_mode_mvs(VP10_COMP *cpi, const MODE_INFO *mi, ...@@ -1190,8 +1216,8 @@ static void pack_inter_mode_mvs(VP10_COMP *cpi, const MODE_INFO *mi,
else else
#endif // CONFIG_EXT_INTER #endif // CONFIG_EXT_INTER
vp10_encode_mv(cpi, w, &mbmi->mv[ref].as_mv, vp10_encode_mv(cpi, w, &mbmi->mv[ref].as_mv,
&mbmi_ext->ref_mvs[mbmi->ref_frame[ref]][0].as_mv, nmvc, &ref_mv.as_mv, nmvc,
allow_hp); allow_hp);
} }
#if CONFIG_EXT_INTER #if CONFIG_EXT_INTER
} else if (mode == NEAREST_NEWMV || mode == NEAR_NEWMV) { } else if (mode == NEAREST_NEWMV || mode == NEAR_NEWMV) {
......
...@@ -1085,6 +1085,10 @@ static void update_state(VP10_COMP *cpi, ThreadData *td, ...@@ -1085,6 +1085,10 @@ static void update_state(VP10_COMP *cpi, ThreadData *td,
const int mi_height = num_8x8_blocks_high_lookup[bsize]; const int mi_height = num_8x8_blocks_high_lookup[bsize];
int max_plane; int max_plane;
#if CONFIG_REF_MV
int8_t rf_type;
#endif
#if !CONFIG_SUPERTX #if !CONFIG_SUPERTX
assert(mi->mbmi.sb_type == bsize); assert(mi->mbmi.sb_type == bsize);
#endif #endif
...@@ -1092,6 +1096,23 @@ static void update_state(VP10_COMP *cpi, ThreadData *td, ...@@ -1092,6 +1096,23 @@ static void update_state(VP10_COMP *cpi, ThreadData *td,
*mi_addr = *mi; *mi_addr = *mi;
*x->mbmi_ext = ctx->mbmi_ext; *x->mbmi_ext = ctx->mbmi_ext;
#if CONFIG_REF_MV
rf_type = vp10_ref_frame_type(mbmi->ref_frame);
if (x->mbmi_ext->ref_mv_count[rf_type] > 1 &&
mbmi->sb_type >= BLOCK_8X8 &&
mbmi->mode == NEWMV) {
for (i = 0; i < 1 + has_second_ref(mbmi); ++i) {
int_mv this_mv = (i == 0) ?
x->mbmi_ext->ref_mv_stack[rf_type][mbmi->ref_mv_idx].this_mv :
x->mbmi_ext->ref_mv_stack[rf_type][mbmi->ref_mv_idx].comp_mv;
clamp_mv_ref(&this_mv.as_mv, xd->n8_w << 3, xd->n8_h << 3, xd);
lower_mv_precision(&this_mv.as_mv, cm->allow_high_precision_mv);
x->mbmi_ext->ref_mvs[mbmi->ref_frame[i]][0] = this_mv;
mbmi->pred_mv[i] = this_mv;
}
}
#endif
// If segmentation in use // If segmentation in use
if (seg->enabled) { if (seg->enabled) {
// For in frame complexity AQ copy the segment id from the segment map. // For in frame complexity AQ copy the segment id from the segment map.
...@@ -1231,11 +1252,32 @@ static void update_state_supertx(VP10_COMP *cpi, ThreadData *td, ...@@ -1231,11 +1252,32 @@ static void update_state_supertx(VP10_COMP *cpi, ThreadData *td,
cm->cur_frame->mvs + mi_row * cm->mi_cols + mi_col; cm->cur_frame->mvs + mi_row * cm->mi_cols + mi_col;
int w, h; int w, h;
#if CONFIG_REF_MV
int8_t rf_type;
#endif
*mi_addr = *mi; *mi_addr = *mi;
*x->mbmi_ext = ctx->mbmi_ext; *x->mbmi_ext = ctx->mbmi_ext;
assert(is_inter_block(mbmi)); assert(is_inter_block(mbmi));
assert(mbmi->tx_size == ctx->mic.mbmi.tx_size); assert(mbmi->tx_size == ctx->mic.mbmi.tx_size);
#if CONFIG_REF_MV
rf_type = vp10_ref_frame_type(mbmi->ref_frame);
if (x->mbmi_ext->ref_mv_count[rf_type] > 1 &&
mbmi->sb_type >= BLOCK_8X8 &&
mbmi->mode == NEWMV) {
for (i = 0; i < 1 + has_second_ref(mbmi); ++i) {
int_mv this_mv = (i == 0) ?
x->mbmi_ext->ref_mv_stack[rf_type][mbmi->ref_mv_idx].this_mv :
x->mbmi_ext->ref_mv_stack[rf_type][mbmi->ref_mv_idx].comp_mv;
clamp_mv_ref(&this_mv.as_mv, xd->n8_w << 3, xd->n8_h << 3, xd);
lower_mv_precision(&this_mv.as_mv, cm->allow_high_precision_mv);
x->mbmi_ext->ref_mvs[mbmi->ref_frame[i]][0] = this_mv;
mbmi->pred_mv[i] = this_mv;
}
}
#endif
// If segmentation in use // If segmentation in use
if (seg->enabled && output_enabled) { if (seg->enabled && output_enabled) {
// For in frame complexity AQ copy the segment id from the segment map. // For in frame complexity AQ copy the segment id from the segment map.
......
...@@ -8015,6 +8015,13 @@ void vp10_rd_pick_inter_mode_sb(VP10_COMP *cpi, ...@@ -8015,6 +8015,13 @@ void vp10_rd_pick_inter_mode_sb(VP10_COMP *cpi,
rate2 += intra_cost_penalty; rate2 += intra_cost_penalty;
distortion2 = distortion_y + distortion_uv; distortion2 = distortion_y + distortion_uv;
} else { } else {
#if CONFIG_REF_MV
int_mv backup_ref_mv[2];
backup_ref_mv[0] = mbmi_ext->ref_mvs[ref_frame][0];
if (comp_pred)
backup_ref_mv[1] = mbmi_ext->ref_mvs[second_ref_frame][0];
#endif
#if CONFIG_EXT_INTER #if CONFIG_EXT_INTER
if (second_ref_frame == INTRA_FRAME) { if (second_ref_frame == INTRA_FRAME) {
mbmi->interintra_mode = best_intra_mode; mbmi->interintra_mode = best_intra_mode;
...@@ -8033,6 +8040,19 @@ void vp10_rd_pick_inter_mode_sb(VP10_COMP *cpi, ...@@ -8033,6 +8040,19 @@ void vp10_rd_pick_inter_mode_sb(VP10_COMP *cpi,
#if CONFIG_REF_MV #if CONFIG_REF_MV
mbmi->ref_mv_idx = 0; mbmi->ref_mv_idx = 0;
ref_frame_type = vp10_ref_frame_type(mbmi->ref_frame); ref_frame_type = vp10_ref_frame_type(mbmi->ref_frame);
if (this_mode == NEWMV &&
mbmi_ext->ref_mv_count[ref_frame_type] > 1) {
int ref;
for (ref = 0; ref < 1 + comp_pred; ++ref) {
int_mv this_mv = (ref == 0) ?
mbmi_ext->ref_mv_stack[ref_frame_type][0].this_mv :
mbmi_ext->ref_mv_stack[ref_frame_type][0].comp_mv;
clamp_mv_ref(&this_mv.as_mv, xd->n8_w << 3, xd->n8_h << 3, xd);
lower_mv_precision(&this_mv.as_mv, cm->allow_high_precision_mv);
mbmi_ext->ref_mvs[mbmi->ref_frame[ref]][0] = this_mv;
}
}
#endif #endif
this_rd = handle_inter_mode(cpi, x, bsize, this_rd = handle_inter_mode(cpi, x, bsize,
&rate2, &distortion2, &skippable, &rate2, &distortion2, &skippable,
...@@ -8058,20 +8078,38 @@ void vp10_rd_pick_inter_mode_sb(VP10_COMP *cpi, ...@@ -8058,20 +8078,38 @@ void vp10_rd_pick_inter_mode_sb(VP10_COMP *cpi,
#if CONFIG_REF_MV #if CONFIG_REF_MV
// TODO(jingning): This needs some refactoring to improve code quality // TODO(jingning): This needs some refactoring to improve code quality
// and reduce redundant steps. // and reduce redundant steps.
if (mbmi->mode == NEARMV && if ((mbmi->mode == NEARMV &&
mbmi_ext->ref_mv_count[ref_frame_type] > 2) { mbmi_ext->ref_mv_count[ref_frame_type] > 2) ||
(mbmi->mode == NEWMV &&
mbmi_ext->ref_mv_count[ref_frame_type] > 1)) {
int_mv backup_mv = frame_mv[NEARMV][ref_frame]; int_mv backup_mv = frame_mv[NEARMV][ref_frame];
int_mv cur_mv = mbmi_ext->ref_mv_stack[ref_frame][2].this_mv;
MB_MODE_INFO backup_mbmi = *mbmi; MB_MODE_INFO backup_mbmi = *mbmi;
int backup_skip = x->skip; int backup_skip = x->skip;
int64_t tmp_ref_rd = this_rd; int64_t tmp_ref_rd = this_rd;
int ref_idx; int ref_idx;
int ref_set = VPXMIN(2, mbmi_ext->ref_mv_count[ref_frame_type] - 2);
uint8_t drl0_ctx = // TODO(jingning): This should be deprecated shortly.
vp10_drl_ctx(mbmi_ext->ref_mv_stack[ref_frame_type], 1); int idx_offset = (mbmi->mode == NEARMV) ? 1 : 0;
rate2 += cpi->drl_mode_cost0[drl0_ctx][0];
int ref_set =
VPXMIN(2, mbmi_ext->ref_mv_count[ref_frame_type] - 1 - idx_offset);
uint8_t drl0_ctx = 0;
uint8_t drl_ctx = 0;
// Dummy
int_mv backup_fmv[2];
backup_fmv[0] = frame_mv[NEWMV][ref_frame];
if (comp_pred)
backup_fmv[1] = frame_mv[NEWMV][second_ref_frame];
if (mbmi->mode == NEARMV) {
drl0_ctx = vp10_drl_ctx(mbmi_ext->ref_mv_stack[ref_frame_type], 1);
rate2 += cpi->drl_mode_cost0[drl0_ctx][0];
} else {
drl_ctx = vp10_drl_ctx(mbmi_ext->ref_mv_stack[ref_frame_type], 0);
rate2 += cpi->drl_mode_cost0[drl_ctx][0];
}
if (this_rd < INT64_MAX) { if (this_rd < INT64_MAX) {
if (RDCOST(x->rdmult, x->rddiv, rate_y + rate_uv, distortion2) < if (RDCOST(x->rdmult, x->rddiv, rate_y + rate_uv, distortion2) <
...@@ -8097,8 +8135,24 @@ void vp10_rd_pick_inter_mode_sb(VP10_COMP *cpi, ...@@ -8097,8 +8135,24 @@ void vp10_rd_pick_inter_mode_sb(VP10_COMP *cpi,
int tmp_skip = 1; int tmp_skip = 1;
int64_t tmp_dist = 0, tmp_sse = 0; int64_t tmp_dist = 0, tmp_sse = 0;
int dummy_disable_skip = 0; int dummy_disable_skip = 0;
int ref;
int_mv cur_mv;
mbmi->ref_mv_idx = 1 + ref_idx;
for (ref = 0; ref < 1 + comp_pred; ++ref) {
int_mv this_mv = (ref == 0) ?
mbmi_ext->ref_mv_stack[ref_frame_type]
[mbmi->ref_mv_idx].this_mv :
mbmi_ext->ref_mv_stack[ref_frame_type]
[mbmi->ref_mv_idx].comp_mv;
clamp_mv_ref(&this_mv.as_mv, xd->n8_w << 3, xd->n8_h << 3, xd);
lower_mv_precision(&this_mv.as_mv, cm->allow_high_precision_mv);
mbmi_ext->ref_mvs[mbmi->ref_frame[ref]][0] = this_mv;
}
cur_mv = mbmi_ext->ref_mv_stack[ref_frame][2 + ref_idx].this_mv; cur_mv = mbmi_ext->ref_mv_stack[ref_frame]
[mbmi->ref_mv_idx + idx_offset].this_mv;
lower_mv_precision(&cur_mv.as_mv, cm->allow_high_precision_mv); lower_mv_precision(&cur_mv.as_mv, cm->allow_high_precision_mv);
clamp_mv2(&cur_mv.as_mv, xd); clamp_mv2(&cur_mv.as_mv, xd);
...@@ -8117,7 +8171,6 @@ void vp10_rd_pick_inter_mode_sb(VP10_COMP *cpi, ...@@ -8117,7 +8171,6 @@ void vp10_rd_pick_inter_mode_sb(VP10_COMP *cpi,
#else #else
int_mv dummy_single_newmv[MAX_REF_FRAMES] = { { 0 } }; int_mv dummy_single_newmv[MAX_REF_FRAMES] = { { 0 } };
#endif #endif
mbmi->ref_mv_idx = 1 + ref_idx;
frame_mv[NEARMV][ref_frame] = cur_mv; frame_mv[NEARMV][ref_frame] = cur_mv;
tmp_alt_rd = handle_inter_mode(cpi, x, bsize, tmp_alt_rd = handle_inter_mode(cpi, x, bsize,
...@@ -8142,12 +8195,23 @@ void vp10_rd_pick_inter_mode_sb(VP10_COMP *cpi, ...@@ -8142,12 +8195,23 @@ void vp10_rd_pick_inter_mode_sb(VP10_COMP *cpi,
&tmp_sse, best_rd); &tmp_sse, best_rd);
} }
tmp_rate += cpi->drl_mode_cost0[drl0_ctx][1]; if (this_mode == NEARMV) {
tmp_rate += cpi->drl_mode_cost0[drl0_ctx][1];
if (mbmi_ext->ref_mv_count[ref_frame_type] > 3) {
uint8_t drl1_ctx =
vp10_drl_ctx(mbmi_ext->ref_mv_stack[ref_frame_type], 2);
tmp_rate += cpi->drl_mode_cost1[drl1_ctx][ref_idx];
}
}
if (this_mode == NEWMV) {
tmp_rate += cpi->drl_mode_cost0[drl_ctx][1];
if (mbmi_ext->ref_mv_count[ref_frame_type] > 3) { if (mbmi_ext->ref_mv_count[ref_frame_type] > 2) {
uint8_t drl1_ctx = uint8_t this_drl_ctx =
vp10_drl_ctx(mbmi_ext->ref_mv_stack[ref_frame_type], 2); vp10_drl_ctx(mbmi_ext->ref_mv_stack[ref_frame_type], 1);
tmp_rate += cpi->drl_mode_cost1[drl1_ctx][ref_idx]; tmp_rate += cpi->drl_mode_cost0[this_drl_ctx][ref_idx];
}
} }
if (tmp_alt_rd < INT64_MAX) { if (tmp_alt_rd < INT64_MAX) {
...@@ -8192,12 +8256,18 @@ void vp10_rd_pick_inter_mode_sb(VP10_COMP *cpi, ...@@ -8192,12 +8256,18 @@ void vp10_rd_pick_inter_mode_sb(VP10_COMP *cpi,
} }
frame_mv[NEARMV][ref_frame] = backup_mv; frame_mv[NEARMV][ref_frame] = backup_mv;
frame_mv[NEWMV][ref_frame] = backup_fmv[0];
if (comp_pred)
frame_mv[NEWMV][second_ref_frame] = backup_fmv[1];
#if CONFIG_VAR_TX #if CONFIG_VAR_TX
for (i = 0; i < MAX_MB_PLANE; ++i) for (i = 0; i < MAX_MB_PLANE; ++i)
memcpy(x->blk_skip[i], x->blk_skip_drl[i], memcpy(x->blk_skip[i], x->blk_skip_drl[i],
sizeof(uint8_t) * ctx->num_4x4_blk); sizeof(uint8_t) * ctx->num_4x4_blk);
#endif #endif
} }
mbmi_ext->ref_mvs[ref_frame][0] = backup_ref_mv[0];
if (comp_pred)
mbmi_ext->ref_mvs[second_ref_frame][0] = backup_ref_mv[1];
#endif // CONFIG_REF_MV #endif // CONFIG_REF_MV
if (this_rd == INT64_MAX) if (this_rd == INT64_MAX)
...@@ -8506,17 +8576,21 @@ void vp10_rd_pick_inter_mode_sb(VP10_COMP *cpi, ...@@ -8506,17 +8576,21 @@ void vp10_rd_pick_inter_mode_sb(VP10_COMP *cpi,
#if CONFIG_REF_MV #if CONFIG_REF_MV
const uint8_t rf_type = vp10_ref_frame_type(best_mbmode.ref_frame); const uint8_t rf_type = vp10_ref_frame_type(best_mbmode.ref_frame);
if (!comp_pred_mode) { if (!comp_pred_mode) {
if (best_mbmode.ref_mv_idx > 0 && refs[1] == NONE) { int i;
int idx = best_mbmode.ref_mv_idx + 1; int ref_set = (mbmi_ext->ref_mv_count[rf_type] >= 2) ?
int_mv cur_mv = mbmi_ext->ref_mv_stack[refs[0]][idx].this_mv; VPXMIN(2, mbmi_ext->ref_mv_count[rf_type] - 2) : INT_MAX;
for (i = 0; i <= ref_set && ref_set != INT_MAX; ++i) {
int_mv cur_mv = mbmi_ext->ref_mv_stack[rf_type][i + 1].this_mv;
lower_mv_precision(&cur_mv.as_mv, cm->allow_high_precision_mv); lower_mv_precision(&cur_mv.as_mv, cm->allow_high_precision_mv);
frame_mv[NEARMV][refs[0]] = cur_mv; if (cur_mv.as_int == best_mbmode.mv[0].as_int) {
best_mbmode.mode = NEARMV;
best_mbmode.ref_mv_idx = i;
}
} }
if (frame_mv[NEARESTMV][refs[0]].as_int == best_mbmode.mv[0].as_int) if (frame_mv[NEARESTMV][refs[0]].as_int == best_mbmode.mv[0].as_int)
best_mbmode.mode = NEARESTMV; best_mbmode.mode = NEARESTMV;
else if (frame_mv[NEARMV][refs[0]].as_int == best_mbmode.mv[0].as_int)
best_mbmode.mode = NEARMV;
else if (best_mbmode.mv[0].as_int == 0) else if (best_mbmode.mv[0].as_int == 0)
best_mbmode.mode = ZEROMV; best_mbmode.mode = ZEROMV;
} else { } else {
...@@ -8524,21 +8598,37 @@ void vp10_rd_pick_inter_mode_sb(VP10_COMP *cpi, ...@@ -8524,21 +8598,37 @@ void vp10_rd_pick_inter_mode_sb(VP10_COMP *cpi,
const int allow_hp = cm->allow_high_precision_mv; const int allow_hp = cm->allow_high_precision_mv;
int_mv nearestmv[2] = { frame_mv[NEARESTMV][refs[0]], int_mv nearestmv[2] = { frame_mv[NEARESTMV][refs[0]],
frame_mv[NEARESTMV][refs[1]] }; frame_mv[NEARESTMV][refs[1]] };
int_mv nearmv[2] = { frame_mv[NEARMV][refs[0]], int_mv nearmv[2] = { frame_mv[NEARMV][refs[0]],
frame_mv[NEARMV][refs[1]] }; frame_mv[NEARMV][refs[1]] };
#if CONFIG_EXT_INTER
if (mbmi_ext->ref_mv_count[rf_type] > 1) {
nearmv[0] = mbmi_ext->ref_mv_stack[rf_type][1].this_mv;
nearmv[1] = mbmi_ext->ref_mv_stack[rf_type][1].comp_mv;
}
#else
int ref_set = (mbmi_ext->ref_mv_count[rf_type] >= 2) ?
VPXMIN(2, mbmi_ext->ref_mv_count[rf_type] - 2) : INT_MAX;
for (i = 0; i <= ref_set && ref_set != INT_MAX; ++i) {
nearmv[0] = mbmi_ext->ref_mv_stack[rf_type][i + 1].this_mv;
nearmv[1] = mbmi_ext->ref_mv_stack[rf_type][i + 1].comp_mv;
lower_mv_precision(&nearmv[0].as_mv, allow_hp);
lower_mv_precision(&nearmv[1].as_mv, allow_hp);
if (nearmv[0].as_int == best_mbmode.mv[0].as_int &&
nearmv[1].as_int == best_mbmode.mv[1].as_int) {
best_mbmode.mode = NEARMV;
best_mbmode.ref_mv_idx = i;
}
}
#endif
if (mbmi_ext->ref_mv_count[rf_type] >= 1) { if (mbmi_ext->ref_mv_count[rf_type] >= 1) {
nearestmv[0] = mbmi_ext->ref_mv_stack[rf_type][0].this_mv; nearestmv[0] = mbmi_ext->ref_mv_stack[rf_type][0].this_mv;
nearestmv[1] = mbmi_ext->ref_mv_stack[rf_type][0].comp_mv; nearestmv[1] = mbmi_ext->ref_mv_stack[rf_type][0].comp_mv;
} }
if (mbmi_ext->ref_mv_count[rf_type] > 1) {
int ref_mv_idx = best_mbmode.ref_mv_idx + 1;
nearmv[0] = mbmi_ext->ref_mv_stack[rf_type][ref_mv_idx].this_mv;
nearmv[1] = mbmi_ext->ref_mv_stack[rf_type][ref_mv_idx].comp_mv;
}
for (i = 0; i < MAX_MV_REF_CANDIDATES; ++i) { for (i = 0; i < MAX_MV_REF_CANDIDATES; ++i) {
lower_mv_precision(&nearestmv[i].as_mv, allow_hp); lower_mv_precision(&nearestmv[i].as_mv, allow_hp);
lower_mv_precision(&nearmv[i].as_mv, allow_hp); lower_mv_precision(&nearmv[i].as_mv, allow_hp);
...@@ -8558,9 +8648,6 @@ void vp10_rd_pick_inter_mode_sb(VP10_COMP *cpi, ...@@ -8558,9 +8648,6 @@ void vp10_rd_pick_inter_mode_sb(VP10_COMP *cpi,
best_mbmode.mode = ZERO_ZEROMV; best_mbmode.mode = ZERO_ZEROMV;
#else #else
best_mbmode.mode = NEARESTMV; best_mbmode.mode = NEARESTMV;
else if (nearmv[0].as_int == best_mbmode.mv[0].as_int &&
nearmv[1].as_int == best_mbmode.mv[1].as_int)
best_mbmode.mode = NEARMV;
else if (best_mbmode.mv[0].as_int == 0 && best_mbmode.mv[1].as_int == 0) else if (best_mbmode.mv[0].as_int == 0 && best_mbmode.mv[1].as_int == 0)
best_mbmode.mode = ZEROMV; best_mbmode.mode = ZEROMV;
#endif // CONFIG_EXT_INTER #endif // CONFIG_EXT_INTER
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment