Commit ec2ffda5 authored by Jingning Han's avatar Jingning Han

Handle zero motion vector residual

This commit handles the zero motion vector residuals for single
and compound reference modes, respectively. It improves the coding
performance by 0.13% with no additional encoding complexity.

Change-Id: I16075a836025bd2746da2ff4698fb9261e4b08c1
parent c5449d3e
......@@ -44,7 +44,12 @@ const vpx_tree_index vp10_mv_fp_tree[TREE_SIZE(MV_FP_SIZE)] = {
};
static const nmv_context default_nmv_context = {
#if CONFIG_REF_MV
{1, 64, 96},
128,
#else
{32, 64, 96},
#endif
{
{ // Vertical component
128, // sign
......@@ -169,6 +174,12 @@ static void inc_mv_component(int v, nmv_component_counts *comp_counts,
void vp10_inc_mv(const MV *mv, nmv_context_counts *counts, const int usehp) {
if (counts != NULL) {
const MV_JOINT_TYPE j = vp10_get_mv_joint(mv);
#if CONFIG_REF_MV
++counts->zero_rmv[j == MV_JOINT_ZERO];
if (j == MV_JOINT_ZERO)
return;
#endif
++counts->joints[j];
if (mv_joint_vertical(j)) {
......@@ -195,6 +206,9 @@ void vp10_adapt_mv_probs(VP10_COMMON *cm, int allow_hp) {
vpx_tree_merge_probs(vp10_mv_joint_tree, pre_fc->joints, counts->joints,
fc->joints);
#if CONFIG_REF_MV
fc->zero_rmv = mode_mv_merge_probs(pre_fc->zero_rmv, counts->zero_rmv);
#endif
for (i = 0; i < 2; ++i) {
nmv_component *comp = &fc->comps[i];
......
......@@ -95,6 +95,9 @@ typedef struct {
typedef struct {
vpx_prob joints[MV_JOINTS - 1];
#if CONFIG_REF_MV
vpx_prob zero_rmv;
#endif
nmv_component comps[2];
} nmv_context;
......@@ -121,6 +124,9 @@ typedef struct {
typedef struct {
unsigned int joints[MV_JOINTS];
#if CONFIG_REF_MV
unsigned int zero_rmv[2];
#endif
nmv_component_counts comps[2];
} nmv_context_counts;
......
......@@ -189,6 +189,10 @@ static void read_mv_probs(nmv_context *ctx, int allow_hp, vp10_reader *r) {
update_mv_probs(ctx->joints, MV_JOINTS - 1, r);
#if CONFIG_REF_MV
vp10_diff_update_prob(r, &ctx->zero_rmv);
#endif
for (i = 0; i < 2; ++i) {
nmv_component *const comp_ctx = &ctx->comps[i];
update_mv_probs(&comp_ctx->sign, 1, r);
......
......@@ -685,13 +685,37 @@ static int read_mv_component(vp10_reader *r,
}
static INLINE void read_mv(vp10_reader *r, MV *mv, const MV *ref,
#if CONFIG_REF_MV
int is_compound,
#endif
const nmv_context *ctx,
nmv_context_counts *counts, int allow_hp) {
const MV_JOINT_TYPE joint_type =
(MV_JOINT_TYPE)vp10_read_tree(r, vp10_mv_joint_tree, ctx->joints);
MV_JOINT_TYPE joint_type;
const int use_hp = allow_hp && vp10_use_mv_hp(ref);
MV diff = {0, 0};
#if CONFIG_REF_MV && !CONFIG_EXT_INTER
if (is_compound) {
int is_zero_rmv = vpx_read(r, ctx->zero_rmv);
if (is_zero_rmv) {
joint_type = MV_JOINT_ZERO;
} else {
joint_type = (MV_JOINT_TYPE)vp10_read_tree(r, vp10_mv_joint_tree,
ctx->joints);
}
} else {
joint_type = (MV_JOINT_TYPE)vp10_read_tree(r, vp10_mv_joint_tree,
ctx->joints);
}
#else
joint_type = (MV_JOINT_TYPE)vp10_read_tree(r, vp10_mv_joint_tree,
ctx->joints);
#endif
#if CONFIG_REF_MV && CONFIG_EXT_INTER
(void)is_compound;
#endif
if (mv_joint_vertical(joint_type))
diff.row = read_mv_component(r, &ctx->comps[0], use_hp);
......@@ -964,8 +988,11 @@ static INLINE int assign_mv(VP10_COMMON *cm, MACROBLOCKD *xd,
xd->ref_mv_stack[mbmi->ref_frame[i]]);
nmv_context_counts *const mv_counts =
counts ? &counts->mv[nmv_ctx] : NULL;
read_mv(r, &mv[i].as_mv, &ref_mv[i].as_mv, &cm->fc->nmvc[nmv_ctx],
mv_counts, allow_hp);
read_mv(r, &mv[i].as_mv, &ref_mv[i].as_mv,
#if CONFIG_REF_MV
is_compound,
#endif
&cm->fc->nmvc[nmv_ctx], mv_counts, allow_hp);
#else
read_mv(r, &mv[i].as_mv, &ref_mv[i].as_mv, &cm->fc->nmvc, mv_counts,
allow_hp);
......@@ -1027,7 +1054,7 @@ static INLINE int assign_mv(VP10_COMMON *cm, MACROBLOCKD *xd,
xd->ref_mv_stack[mbmi->ref_frame[i]]);
nmv_context_counts *const mv_counts =
counts ? &counts->mv[nmv_ctx] : NULL;
read_mv(r, &mv[i].as_mv, &ref_mv[i].as_mv,
read_mv(r, &mv[i].as_mv, &ref_mv[i].as_mv, is_compound,
&cm->fc->nmvc[nmv_ctx], mv_counts,
allow_hp);
#else
......@@ -1063,7 +1090,7 @@ static INLINE int assign_mv(VP10_COMMON *cm, MACROBLOCKD *xd,
xd->ref_mv_stack[mbmi->ref_frame[0]]);
nmv_context_counts *const mv_counts =
counts ? &counts->mv[nmv_ctx] : NULL;
read_mv(r, &mv[0].as_mv, &ref_mv[0].as_mv,
read_mv(r, &mv[0].as_mv, &ref_mv[0].as_mv, is_compound,
&cm->fc->nmvc[nmv_ctx], mv_counts,
allow_hp);
#else
......@@ -1084,7 +1111,7 @@ static INLINE int assign_mv(VP10_COMMON *cm, MACROBLOCKD *xd,
nmv_context_counts *const mv_counts =
counts ? &counts->mv[nmv_ctx] : NULL;
mv[0].as_int = nearest_mv[0].as_int;
read_mv(r, &mv[1].as_mv, &ref_mv[1].as_mv,
read_mv(r, &mv[1].as_mv, &ref_mv[1].as_mv, is_compound,
&cm->fc->nmvc[nmv_ctx], mv_counts,
allow_hp);
#else
......@@ -1105,7 +1132,7 @@ static INLINE int assign_mv(VP10_COMMON *cm, MACROBLOCKD *xd,
nmv_context_counts *const mv_counts =
counts ? &counts->mv[nmv_ctx] : NULL;
mv[0].as_int = near_mv[0].as_int;
read_mv(r, &mv[1].as_mv, &ref_mv[1].as_mv,
read_mv(r, &mv[1].as_mv, &ref_mv[1].as_mv, is_compound,
&cm->fc->nmvc[nmv_ctx], mv_counts,
allow_hp);
#else
......@@ -1126,7 +1153,7 @@ static INLINE int assign_mv(VP10_COMMON *cm, MACROBLOCKD *xd,
xd->ref_mv_stack[mbmi->ref_frame[0]]);
nmv_context_counts *const mv_counts =
counts ? &counts->mv[nmv_ctx] : NULL;
read_mv(r, &mv[0].as_mv, &ref_mv[0].as_mv,
read_mv(r, &mv[0].as_mv, &ref_mv[0].as_mv, is_compound,
&cm->fc->nmvc[nmv_ctx], mv_counts,
allow_hp);
#else
......
......@@ -1192,9 +1192,13 @@ static void pack_inter_mode_mvs(VP10_COMP *cpi, const MODE_INFO *mi,
vp10_encode_mv(cpi, w, &mi->bmi[j].as_mv[ref].as_mv,
#if CONFIG_EXT_INTER
&mi->bmi[j].ref_mv[ref].as_mv,
#if CONFIG_REF_MV
is_compound,
#endif
#else
#if CONFIG_REF_MV
&mi->bmi[j].pred_mv_s8[ref].as_mv,
is_compound,
#else
&mbmi_ext->ref_mvs[mbmi->ref_frame[ref]][0].as_mv,
#endif // CONFIG_REF_MV
......@@ -1211,7 +1215,11 @@ static void pack_inter_mode_mvs(VP10_COMP *cpi, const MODE_INFO *mi,
const nmv_context *nmvc = &cm->fc->nmvc[nmv_ctx];
#endif
vp10_encode_mv(cpi, w, &mi->bmi[j].as_mv[1].as_mv,
&mi->bmi[j].ref_mv[1].as_mv, nmvc, allow_hp);
&mi->bmi[j].ref_mv[1].as_mv,
#if CONFIG_REF_MV
is_compound,
#endif
nmvc, allow_hp);
} else if (b_mode == NEW_NEARESTMV || b_mode == NEW_NEARMV) {
#if CONFIG_REF_MV
int nmv_ctx =
......@@ -1220,7 +1228,11 @@ static void pack_inter_mode_mvs(VP10_COMP *cpi, const MODE_INFO *mi,
const nmv_context *nmvc = &cm->fc->nmvc[nmv_ctx];
#endif
vp10_encode_mv(cpi, w, &mi->bmi[j].as_mv[0].as_mv,
&mi->bmi[j].ref_mv[0].as_mv, nmvc, allow_hp);
&mi->bmi[j].ref_mv[0].as_mv,
#if CONFIG_REF_MV
is_compound,
#endif
nmvc, allow_hp);
}
#endif // CONFIG_EXT_INTER
}
......@@ -1244,12 +1256,18 @@ static void pack_inter_mode_mvs(VP10_COMP *cpi, const MODE_INFO *mi,
if (mode == NEWFROMNEARMV)
vp10_encode_mv(cpi, w, &mbmi->mv[ref].as_mv,
&mbmi_ext->ref_mvs[mbmi->ref_frame[ref]][1].as_mv,
#if CONFIG_REF_MV
is_compound,
#endif
nmvc, allow_hp);
else
#endif // CONFIG_EXT_INTER
vp10_encode_mv(cpi, w, &mbmi->mv[ref].as_mv,
&ref_mv.as_mv, nmvc,
allow_hp);
&ref_mv.as_mv,
#if CONFIG_REF_MV
is_compound,
#endif
nmvc, allow_hp);
}
#if CONFIG_EXT_INTER
} else if (mode == NEAREST_NEWMV || mode == NEAR_NEWMV) {
......@@ -1260,8 +1278,11 @@ static void pack_inter_mode_mvs(VP10_COMP *cpi, const MODE_INFO *mi,
const nmv_context *nmvc = &cm->fc->nmvc[nmv_ctx];
#endif
vp10_encode_mv(cpi, w, &mbmi->mv[1].as_mv,
&mbmi_ext->ref_mvs[mbmi->ref_frame[1]][0].as_mv, nmvc,
allow_hp);
&mbmi_ext->ref_mvs[mbmi->ref_frame[1]][0].as_mv,
#if CONFIG_REF_MV
is_compound,
#endif
nmvc, allow_hp);
} else if (mode == NEW_NEARESTMV || mode == NEW_NEARMV) {
#if CONFIG_REF_MV
int nmv_ctx =
......@@ -1270,8 +1291,11 @@ static void pack_inter_mode_mvs(VP10_COMP *cpi, const MODE_INFO *mi,
const nmv_context *nmvc = &cm->fc->nmvc[nmv_ctx];
#endif
vp10_encode_mv(cpi, w, &mbmi->mv[0].as_mv,
&mbmi_ext->ref_mvs[mbmi->ref_frame[0]][0].as_mv, nmvc,
allow_hp);
&mbmi_ext->ref_mvs[mbmi->ref_frame[0]][0].as_mv,
#if CONFIG_REF_MV
is_compound,
#endif
nmvc, allow_hp);
#endif // CONFIG_EXT_INTER
}
}
......
......@@ -118,6 +118,8 @@ struct macroblock {
int *nmvcost_hp[NMV_CONTEXTS][2];
int **mv_cost_stack[NMV_CONTEXTS];
int *nmvjointsadcost;
int zero_rmv_cost[NMV_CONTEXTS][2];
int comp_rmv_cost[2];
#else
int nmvjointcost[MV_JOINTS];
int *nmvcost[2];
......
......@@ -167,6 +167,8 @@ void vp10_write_nmv_probs(VP10_COMMON *cm, int usehp, vp10_writer *w,
write_mv_update(vp10_mv_joint_tree, mvc->joints, counts->joints,
MV_JOINTS, w);
vp10_cond_prob_diff_update(w, &mvc->zero_rmv, counts->zero_rmv);
for (i = 0; i < 2; ++i) {
nmv_component *comp = &mvc->comps[i];
nmv_component_counts *comp_counts = &counts->comps[i];
......@@ -237,12 +239,30 @@ void vp10_write_nmv_probs(VP10_COMMON *cm, int usehp, vp10_writer *w,
void vp10_encode_mv(VP10_COMP* cpi, vp10_writer* w,
const MV* mv, const MV* ref,
#if CONFIG_REF_MV
int is_compound,
#endif
const nmv_context* mvctx, int usehp) {
const MV diff = {mv->row - ref->row,
mv->col - ref->col};
const MV_JOINT_TYPE j = vp10_get_mv_joint(&diff);
usehp = usehp && vp10_use_mv_hp(ref);
#if CONFIG_REF_MV && !CONFIG_EXT_INTER
if (is_compound) {
vpx_write(w, (j == MV_JOINT_ZERO), mvctx->zero_rmv);
if (j == MV_JOINT_ZERO)
return;
} else {
if (j == MV_JOINT_ZERO)
assert(0);
}
#endif
#if CONFIG_REF_MV && CONFIG_EXT_INTER
(void)is_compound;
#endif
vp10_write_token(w, vp10_mv_joint_tree, mvctx->joints, &mv_joint_encodings[j]);
if (mv_joint_vertical(j))
encode_mv_component(w, diff.row, &mvctx->comps[0], usehp);
......
......@@ -24,7 +24,10 @@ void vp10_write_nmv_probs(VP10_COMMON *cm, int usehp, vp10_writer *w,
nmv_context_counts *const counts);
void vp10_encode_mv(VP10_COMP *cpi, vp10_writer* w, const MV* mv, const MV* ref,
const nmv_context* mvctx, int usehp);
#if CONFIG_REF_MV
int is_compound,
#endif
const nmv_context* mvctx, int usehp);
void vp10_build_nmv_cost_table(int *mvjoint, int *mvcost[2],
const nmv_context* mvctx, int usehp);
......
......@@ -349,6 +349,9 @@ void vp10_set_mvcost(MACROBLOCK *x, MV_REFERENCE_FRAME ref_frame) {
x->nmvjointcost = x->nmv_vec_cost[nmv_ctx];
x->mvsadcost = x->mvcost;
x->nmvjointsadcost = x->nmvjointcost;
x->nmv_vec_cost[nmv_ctx][MV_JOINT_ZERO] =
x->zero_rmv_cost[nmv_ctx][1] - x->zero_rmv_cost[nmv_ctx][0];
}
#endif
......@@ -373,12 +376,23 @@ void vp10_initialize_rd_consts(VP10_COMP *cpi) {
if (!frame_is_intra_only(cm)) {
#if CONFIG_REF_MV
int nmv_ctx;
for (nmv_ctx = 0; nmv_ctx < NMV_CONTEXTS; ++nmv_ctx) {
vpx_prob tmp_prob = cm->fc->nmvc[nmv_ctx].joints[MV_JOINT_ZERO];
cm->fc->nmvc[nmv_ctx].joints[MV_JOINT_ZERO] = 1;
vp10_build_nmv_cost_table(
x->nmv_vec_cost[nmv_ctx],
cm->allow_high_precision_mv ? x->nmvcost_hp[nmv_ctx]
: x->nmvcost[nmv_ctx],
&cm->fc->nmvc[nmv_ctx], cm->allow_high_precision_mv);
cm->fc->nmvc[nmv_ctx].joints[MV_JOINT_ZERO] = tmp_prob;
x->nmv_vec_cost[nmv_ctx][MV_JOINT_ZERO] = 0;
x->zero_rmv_cost[nmv_ctx][0] =
vp10_cost_bit(cm->fc->nmvc[nmv_ctx].zero_rmv, 0);
x->zero_rmv_cost[nmv_ctx][1] =
vp10_cost_bit(cm->fc->nmvc[nmv_ctx].zero_rmv, 1);
}
x->mvcost = x->mv_cost_stack[0];
x->nmvjointcost = x->nmv_vec_cost[0];
......
......@@ -4135,15 +4135,30 @@ static int set_and_cost_bmi_mvs(VP10_COMP *cpi, MACROBLOCK *x, MACROBLOCKD *xd,
!vp10_use_mv_hp(&best_ref_mv[0]->as_mv))
lower_mv_precision(&this_mv[0].as_mv, 0);
#endif // CONFIG_EXT_INTER
#if CONFIG_REF_MV
for (idx = 0; idx < 1 + is_compound; ++idx) {
this_mv[idx] = seg_mvs[mbmi->ref_frame[idx]];
vp10_set_mvcost(x, mbmi->ref_frame[idx]);
thismvcost += vp10_mv_bit_cost(&this_mv[idx].as_mv,
&best_ref_mv[idx]->as_mv,
x->nmvjointcost, x->mvcost,
MV_COST_WEIGHT_SUB);
}
(void)mvjcost;
(void)mvcost;
#else
thismvcost += vp10_mv_bit_cost(&this_mv[0].as_mv, &best_ref_mv[0]->as_mv,
mvjcost, mvcost, MV_COST_WEIGHT_SUB);
mvjcost, mvcost, MV_COST_WEIGHT_SUB);
#if !CONFIG_EXT_INTER
if (is_compound) {
this_mv[1].as_int = seg_mvs[mbmi->ref_frame[1]].as_int;
thismvcost += vp10_mv_bit_cost(&this_mv[1].as_mv, &best_ref_mv[1]->as_mv,
mvjcost, mvcost, MV_COST_WEIGHT_SUB);
thismvcost += vp10_mv_bit_cost(&this_mv[1].as_mv,
&best_ref_mv[1]->as_mv,
mvjcost, mvcost, MV_COST_WEIGHT_SUB);
}
#endif // !CONFIG_EXT_INTER
#endif
break;
case NEARMV:
case NEARESTMV:
......@@ -4755,14 +4770,22 @@ static void joint_motion_search(VP10_COMP *cpi, MACROBLOCK *x,
for (i = 0; i < MAX_MB_PLANE; i++)
xd->plane[i].pre[ref] = backup_yv12[ref][i];
}
#if CONFIG_EXT_INTER
if (bsize >= BLOCK_8X8)
#endif // CONFIG_EXT_INTER
#if CONFIG_REF_MV
vp10_set_mvcost(x, refs[ref]);
*rate_mv += vp10_mv_bit_cost(&frame_mv[refs[ref]].as_mv,
&x->mbmi_ext->ref_mvs[refs[ref]][0].as_mv,
x->nmvjointcost, x->mvcost, MV_COST_WEIGHT);
#else
*rate_mv += vp10_mv_bit_cost(&frame_mv[refs[ref]].as_mv,
&x->mbmi_ext->ref_mvs[refs[ref]][0].as_mv,
x->nmvjointcost, x->mvcost, MV_COST_WEIGHT);
#endif
#if CONFIG_EXT_INTER
if (bsize >= BLOCK_8X8)
*rate_mv += vp10_mv_bit_cost(&frame_mv[refs[ref]].as_mv,
&x->mbmi_ext->ref_mvs[refs[ref]][0].as_mv,
x->nmvjointcost, x->mvcost, MV_COST_WEIGHT);
else
*rate_mv += vp10_mv_bit_cost(&frame_mv[refs[ref]].as_mv,
&ref_mv_sub8x8[ref]->as_mv,
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment