Commit df59bb89 authored by Jingning Han's avatar Jingning Han

Vectorize motion vector probability models

This commit converts the scalar motion vector probability model
into vector format for later precise estimate.

Change-Id: I7008d047ecc1b9577aa8442b4db2df312be869dc
parent 876c8b03
......@@ -93,7 +93,11 @@ typedef struct frame_contexts {
vpx_prob txfm_partition_prob[TXFM_PARTITION_CONTEXTS];
#endif
vpx_prob skip_probs[SKIP_CONTEXTS];
#if CONFIG_REF_MV
nmv_context nmvc[NMV_CONTEXTS];
#else
nmv_context nmvc;
#endif
int initialized;
#if CONFIG_EXT_TX
vpx_prob inter_ext_tx_prob[EXT_TX_SETS_INTER][EXT_TX_SIZES][TX_TYPES - 1];
......@@ -150,7 +154,11 @@ typedef struct FRAME_COUNTS {
unsigned int txfm_partition[TXFM_PARTITION_CONTEXTS][2];
#endif
unsigned int skip[SKIP_CONTEXTS][2];
#if CONFIG_REF_MV
nmv_context_counts mv[NMV_CONTEXTS];
#else
nmv_context_counts mv;
#endif
#if CONFIG_EXT_TX
unsigned int inter_ext_tx[EXT_TX_SETS_INTER][EXT_TX_SIZES][TX_TYPES];
unsigned int intra_ext_tx[EXT_TX_SETS_INTRA][EXT_TX_SIZES][INTRA_MODES]
......
......@@ -185,7 +185,45 @@ void vp10_inc_mv(const MV *mv, nmv_context_counts *counts, const int usehp) {
void vp10_adapt_mv_probs(VP10_COMMON *cm, int allow_hp) {
int i, j;
#if CONFIG_REF_MV
int idx;
for (idx = 0; idx < NMV_CONTEXTS; ++idx) {
nmv_context *fc = &cm->fc->nmvc[idx];
const nmv_context *pre_fc =
&cm->frame_contexts[cm->frame_context_idx].nmvc[idx];
const nmv_context_counts *counts = &cm->counts.mv[idx];
vpx_tree_merge_probs(vp10_mv_joint_tree, pre_fc->joints, counts->joints,
fc->joints);
for (i = 0; i < 2; ++i) {
nmv_component *comp = &fc->comps[i];
const nmv_component *pre_comp = &pre_fc->comps[i];
const nmv_component_counts *c = &counts->comps[i];
comp->sign = mode_mv_merge_probs(pre_comp->sign, c->sign);
vpx_tree_merge_probs(vp10_mv_class_tree, pre_comp->classes, c->classes,
comp->classes);
vpx_tree_merge_probs(vp10_mv_class0_tree, pre_comp->class0, c->class0,
comp->class0);
for (j = 0; j < MV_OFFSET_BITS; ++j)
comp->bits[j] = mode_mv_merge_probs(pre_comp->bits[j], c->bits[j]);
for (j = 0; j < CLASS0_SIZE; ++j)
vpx_tree_merge_probs(vp10_mv_fp_tree, pre_comp->class0_fp[j],
c->class0_fp[j], comp->class0_fp[j]);
vpx_tree_merge_probs(vp10_mv_fp_tree, pre_comp->fp, c->fp, comp->fp);
if (allow_hp) {
comp->class0_hp = mode_mv_merge_probs(pre_comp->class0_hp,
c->class0_hp);
comp->hp = mode_mv_merge_probs(pre_comp->hp, c->hp);
}
}
}
#else
nmv_context *fc = &cm->fc->nmvc;
const nmv_context *pre_fc = &cm->frame_contexts[cm->frame_context_idx].nmvc;
const nmv_context_counts *counts = &cm->counts.mv;
......@@ -218,8 +256,15 @@ void vp10_adapt_mv_probs(VP10_COMMON *cm, int allow_hp) {
comp->hp = mode_mv_merge_probs(pre_comp->hp, c->hp);
}
}
#endif
}
void vp10_init_mv_probs(VP10_COMMON *cm) {
#if CONFIG_REF_MV
int i;
for (i = 0; i < NMV_CONTEXTS; ++i)
cm->fc->nmvc[i] = default_nmv_context;
#else
cm->fc->nmvc = default_nmv_context;
#endif
}
......@@ -227,6 +227,8 @@ typedef enum {
#define SKIP_CONTEXTS 3
#if CONFIG_REF_MV
#define NMV_CONTEXTS 2
#define NEWMV_MODE_CONTEXTS 7
#define ZEROMV_MODE_CONTEXTS 2
#define REFMV_MODE_CONTEXTS 9
......
......@@ -228,6 +228,22 @@ static INLINE void lower_mv_precision(MV *mv, int allow_hp) {
}
#if CONFIG_REF_MV
static INLINE int vp10_nmv_ctx(const uint8_t ref_mv_count,
const CANDIDATE_MV *ref_mv_stack) {
#if CONFIG_EXT_INTER
return 0;
#endif
if (ref_mv_stack[0].weight > REF_CAT_LEVEL &&
ref_mv_count > 0) {
if (abs(ref_mv_stack[0].this_mv.as_mv.row -
ref_mv_stack[0].pred_mv.as_mv.row) < 8 &&
abs(ref_mv_stack[0].this_mv.as_mv.col -
ref_mv_stack[0].pred_mv.as_mv.col) < 8)
return 1;
}
return 0;
}
static INLINE int8_t vp10_ref_frame_type(const MV_REFERENCE_FRAME *const rf) {
if (rf[1] > INTRA_FRAME)
return rf[0] + ALTREF_FRAME;
......
......@@ -447,6 +447,39 @@ void vp10_accumulate_frame_counts(VP10_COMMON *cm, FRAME_COUNTS *counts,
for (j = 0; j < 2; j++)
cm->counts.skip[i][j] += counts->skip[i][j];
#if CONFIG_REF_MV
for (m = 0; m < NMV_CONTEXTS; ++m) {
for (i = 0; i < MV_JOINTS; i++)
cm->counts.mv[m].joints[i] += counts->mv[m].joints[i];
for (k = 0; k < 2; k++) {
nmv_component_counts *comps = &cm->counts.mv[m].comps[k];
nmv_component_counts *comps_t = &counts->mv[m].comps[k];
for (i = 0; i < 2; i++) {
comps->sign[i] += comps_t->sign[i];
comps->class0_hp[i] += comps_t->class0_hp[i];
comps->hp[i] += comps_t->hp[i];
}
for (i = 0; i < MV_CLASSES; i++)
comps->classes[i] += comps_t->classes[i];
for (i = 0; i < CLASS0_SIZE; i++) {
comps->class0[i] += comps_t->class0[i];
for (j = 0; j < MV_FP_SIZE; j++)
comps->class0_fp[i][j] += comps_t->class0_fp[i][j];
}
for (i = 0; i < MV_OFFSET_BITS; i++)
for (j = 0; j < 2; j++)
comps->bits[i][j] += comps_t->bits[i][j];
for (i = 0; i < MV_FP_SIZE; i++)
comps->fp[i] += comps_t->fp[i];
}
}
#else
for (i = 0; i < MV_JOINTS; i++)
cm->counts.mv.joints[i] += counts->mv.joints[i];
......@@ -476,6 +509,7 @@ void vp10_accumulate_frame_counts(VP10_COMMON *cm, FRAME_COUNTS *counts,
for (i = 0; i < MV_FP_SIZE; i++)
comps->fp[i] += comps_t->fp[i];
}
#endif
#if CONFIG_EXT_TX
for (i = 0; i < EXT_TX_SIZES; i++) {
......
......@@ -3566,7 +3566,9 @@ static int read_compressed_header(VP10Decoder *pbi, const uint8_t *data,
for (i = 0; i < INTRA_MODES - 1; ++i)
vp10_diff_update_prob(&r, &cm->kf_y_prob[k][j][i]);
} else {
#if !CONFIG_REF_MV
nmv_context *const nmvc = &fc->nmvc;
#endif
read_inter_mode_probs(fc, &r);
......@@ -3593,7 +3595,12 @@ static int read_compressed_header(VP10Decoder *pbi, const uint8_t *data,
for (i = 0; i < INTRA_MODES - 1; ++i)
vp10_diff_update_prob(&r, &fc->y_mode_prob[j][i]);
#if CONFIG_REF_MV
for (i = 0; i < NMV_CONTEXTS; ++i)
read_mv_probs(&fc->nmvc[i], cm->allow_high_precision_mv, &r);
#else
read_mv_probs(nmvc, cm->allow_high_precision_mv, &r);
#endif
read_ext_tx_probs(fc, &r);
#if CONFIG_SUPERTX
if (!xd->lossless[0])
......@@ -3647,7 +3654,14 @@ static void debug_check_frame_counts(const VP10_COMMON *const cm) {
sizeof(cm->counts.comp_ref)));
assert(!memcmp(&cm->counts.tx, &zero_counts.tx, sizeof(cm->counts.tx)));
assert(!memcmp(cm->counts.skip, zero_counts.skip, sizeof(cm->counts.skip)));
#if CONFIG_REF_MV
assert(!memcmp(&cm->counts.mv[0], &zero_counts.mv[0],
sizeof(cm->counts.mv[0])));
assert(!memcmp(&cm->counts.mv[1], &zero_counts.mv[1],
sizeof(cm->counts.mv[0])));
#else
assert(!memcmp(&cm->counts.mv, &zero_counts.mv, sizeof(cm->counts.mv)));
#endif
assert(!memcmp(cm->counts.inter_ext_tx, zero_counts.inter_ext_tx,
sizeof(cm->counts.inter_ext_tx)));
assert(!memcmp(cm->counts.intra_ext_tx, zero_counts.intra_ext_tx,
......
......@@ -912,10 +912,21 @@ static INLINE int assign_mv(VP10_COMMON *cm, MACROBLOCKD *xd,
#endif // CONFIG_EXT_INTER
case NEWMV: {
FRAME_COUNTS *counts = xd->counts;
#if !CONFIG_REF_MV
nmv_context_counts *const mv_counts = counts ? &counts->mv : NULL;
#endif
for (i = 0; i < 1 + is_compound; ++i) {
#if CONFIG_REF_MV
int nmv_ctx = vp10_nmv_ctx(xd->ref_mv_count[mbmi->ref_frame[i]],
xd->ref_mv_stack[mbmi->ref_frame[i]]);
nmv_context_counts *const mv_counts =
counts ? &counts->mv[nmv_ctx] : NULL;
read_mv(r, &mv[i].as_mv, &ref_mv[i].as_mv, &cm->fc->nmvc[nmv_ctx],
mv_counts, allow_hp);
#else
read_mv(r, &mv[i].as_mv, &ref_mv[i].as_mv, &cm->fc->nmvc, mv_counts,
allow_hp);
#endif
ret = ret && is_mv_valid(&mv[i].as_mv);
#if CONFIG_REF_MV
......@@ -963,11 +974,23 @@ static INLINE int assign_mv(VP10_COMMON *cm, MACROBLOCKD *xd,
#if CONFIG_EXT_INTER
case NEW_NEWMV: {
FRAME_COUNTS *counts = xd->counts;
#if !CONFIG_REF_MV
nmv_context_counts *const mv_counts = counts ? &counts->mv : NULL;
#endif
assert(is_compound);
for (i = 0; i < 2; ++i) {
#if CONFIG_REF_MV
int nmv_ctx = vp10_nmv_ctx(xd->ref_mv_count[mbmi->ref_frame[i]],
xd->ref_mv_stack[mbmi->ref_frame[i]]);
nmv_context_counts *const mv_counts =
counts ? &counts->mv[nmv_ctx] : NULL;
read_mv(r, &mv[i].as_mv, &ref_mv[i].as_mv,
&cm->fc->nmvc[nmv_ctx], mv_counts,
allow_hp);
#else
read_mv(r, &mv[i].as_mv, &ref_mv[i].as_mv, &cm->fc->nmvc, mv_counts,
allow_hp);
#endif
ret = ret && is_mv_valid(&mv[i].as_mv);
}
break;
......@@ -992,40 +1015,83 @@ static INLINE int assign_mv(VP10_COMMON *cm, MACROBLOCKD *xd,
}
case NEW_NEARESTMV: {
FRAME_COUNTS *counts = xd->counts;
#if CONFIG_REF_MV
int nmv_ctx = vp10_nmv_ctx(xd->ref_mv_count[mbmi->ref_frame[0]],
xd->ref_mv_stack[mbmi->ref_frame[0]]);
nmv_context_counts *const mv_counts =
counts ? &counts->mv[nmv_ctx] : NULL;
read_mv(r, &mv[0].as_mv, &ref_mv[0].as_mv,
&cm->fc->nmvc[nmv_ctx], mv_counts,
allow_hp);
#else
nmv_context_counts *const mv_counts = counts ? &counts->mv : NULL;
assert(is_compound);
read_mv(r, &mv[0].as_mv, &ref_mv[0].as_mv, &cm->fc->nmvc, mv_counts,
allow_hp);
#endif
assert(is_compound);
ret = ret && is_mv_valid(&mv[0].as_mv);
mv[1].as_int = nearest_mv[1].as_int;
break;
}
case NEAREST_NEWMV: {
FRAME_COUNTS *counts = xd->counts;
#if CONFIG_REF_MV
int nmv_ctx = vp10_nmv_ctx(xd->ref_mv_count[mbmi->ref_frame[1]],
xd->ref_mv_stack[mbmi->ref_frame[1]]);
nmv_context_counts *const mv_counts =
counts ? &counts->mv[nmv_ctx] : NULL;
mv[0].as_int = nearest_mv[0].as_int;
read_mv(r, &mv[1].as_mv, &ref_mv[1].as_mv,
&cm->fc->nmvc[nmv_ctx], mv_counts,
allow_hp);
#else
nmv_context_counts *const mv_counts = counts ? &counts->mv : NULL;
assert(is_compound);
mv[0].as_int = nearest_mv[0].as_int;
read_mv(r, &mv[1].as_mv, &ref_mv[1].as_mv, &cm->fc->nmvc, mv_counts,
allow_hp);
#endif
assert(is_compound);
ret = ret && is_mv_valid(&mv[1].as_mv);
break;
}
case NEAR_NEWMV: {
FRAME_COUNTS *counts = xd->counts;
#if CONFIG_REF_MV
int nmv_ctx = vp10_nmv_ctx(xd->ref_mv_count[mbmi->ref_frame[1]],
xd->ref_mv_stack[mbmi->ref_frame[1]]);
nmv_context_counts *const mv_counts =
counts ? &counts->mv[nmv_ctx] : NULL;
mv[0].as_int = near_mv[0].as_int;
read_mv(r, &mv[1].as_mv, &ref_mv[1].as_mv,
&cm->fc->nmvc[nmv_ctx], mv_counts,
allow_hp);
#else
nmv_context_counts *const mv_counts = counts ? &counts->mv : NULL;
assert(is_compound);
mv[0].as_int = near_mv[0].as_int;
read_mv(r, &mv[1].as_mv, &ref_mv[1].as_mv, &cm->fc->nmvc, mv_counts,
allow_hp);
#endif
assert(is_compound);
ret = ret && is_mv_valid(&mv[1].as_mv);
break;
}
case NEW_NEARMV: {
FRAME_COUNTS *counts = xd->counts;
#if CONFIG_REF_MV
int nmv_ctx = vp10_nmv_ctx(xd->ref_mv_count[mbmi->ref_frame[0]],
xd->ref_mv_stack[mbmi->ref_frame[0]]);
nmv_context_counts *const mv_counts =
counts ? &counts->mv[nmv_ctx] : NULL;
read_mv(r, &mv[0].as_mv, &ref_mv[0].as_mv,
&cm->fc->nmvc[nmv_ctx], mv_counts,
allow_hp);
#else
nmv_context_counts *const mv_counts = counts ? &counts->mv : NULL;
assert(is_compound);
read_mv(r, &mv[0].as_mv, &ref_mv[0].as_mv, &cm->fc->nmvc, mv_counts,
allow_hp);
#endif
assert(is_compound);
ret = ret && is_mv_valid(&mv[0].as_mv);
mv[1].as_int = near_mv[1].as_int;
break;
......@@ -1342,6 +1408,10 @@ static void read_inter_block_mode_info(VP10Decoder *const pbi,
}
}
#if CONFIG_REF_MV
mbmi->pred_mv[0].as_int = mi->bmi[3].pred_mv[0].as_int;
mbmi->pred_mv[1].as_int = mi->bmi[3].pred_mv[1].as_int;
#endif
mi->mbmi.mode = b_mode;
mbmi->mv[0].as_int = mi->bmi[3].as_mv[0].as_int;
......
......@@ -882,7 +882,9 @@ static void pack_inter_mode_mvs(VP10_COMP *cpi, const MODE_INFO *mi,
#endif
vpx_writer *w) {
VP10_COMMON *const cm = &cpi->common;
#if !CONFIG_REF_MV
const nmv_context *nmvc = &cm->fc->nmvc;
#endif
const MACROBLOCK *x = &cpi->td.mb;
const MACROBLOCKD *xd = &x->e_mbd;
const struct segmentation *const seg = &cm->seg;
......@@ -1070,20 +1072,39 @@ static void pack_inter_mode_mvs(VP10_COMP *cpi, const MODE_INFO *mi,
#else
if (b_mode == NEWMV) {
#endif // CONFIG_EXT_INTER
for (ref = 0; ref < 1 + is_compound; ++ref)
for (ref = 0; ref < 1 + is_compound; ++ref) {
#if CONFIG_REF_MV
int nmv_ctx =
vp10_nmv_ctx(mbmi_ext->ref_mv_count[mbmi->ref_frame[ref]],
mbmi_ext->ref_mv_stack[mbmi->ref_frame[ref]]);
const nmv_context *nmvc = &cm->fc->nmvc[nmv_ctx];
#endif
vp10_encode_mv(cpi, w, &mi->bmi[j].as_mv[ref].as_mv,
#if CONFIG_EXT_INTER
&mi->bmi[j].ref_mv[ref].as_mv,
#else
&mbmi_ext->ref_mvs[mbmi->ref_frame[ref]][0].as_mv,
#endif // CONFIG_EXT_INTER
nmvc, allow_hp);
nmvc, allow_hp);
}
}
#if CONFIG_EXT_INTER
else if (b_mode == NEAREST_NEWMV || b_mode == NEAR_NEWMV) {
#if CONFIG_REF_MV
int nmv_ctx =
vp10_nmv_ctx(mbmi_ext->ref_mv_count[mbmi->ref_frame[1]],
mbmi_ext->ref_mv_stack[mbmi->ref_frame[1]]);
const nmv_context *nmvc = &cm->fc->nmvc[nmv_ctx];
#endif
vp10_encode_mv(cpi, w, &mi->bmi[j].as_mv[1].as_mv,
&mi->bmi[j].ref_mv[1].as_mv, nmvc, allow_hp);
} else if (b_mode == NEW_NEARESTMV || b_mode == NEW_NEARMV) {
#if CONFIG_REF_MV
int nmv_ctx =
vp10_nmv_ctx(mbmi_ext->ref_mv_count[mbmi->ref_frame[0]],
mbmi_ext->ref_mv_stack[mbmi->ref_frame[0]]);
const nmv_context *nmvc = &cm->fc->nmvc[nmv_ctx];
#endif
vp10_encode_mv(cpi, w, &mi->bmi[j].as_mv[0].as_mv,
&mi->bmi[j].ref_mv[0].as_mv, nmvc, allow_hp);
}
......@@ -1096,9 +1117,14 @@ static void pack_inter_mode_mvs(VP10_COMP *cpi, const MODE_INFO *mi,
#else
if (mode == NEWMV) {
#endif // CONFIG_EXT_INTER
for (ref = 0; ref < 1 + is_compound; ++ref)
for (ref = 0; ref < 1 + is_compound; ++ref) {
#if CONFIG_REF_MV
int nmv_ctx =
vp10_nmv_ctx(mbmi_ext->ref_mv_count[mbmi->ref_frame[ref]],
mbmi_ext->ref_mv_stack[mbmi->ref_frame[ref]]);
const nmv_context *nmvc = &cm->fc->nmvc[nmv_ctx];
#endif
#if CONFIG_EXT_INTER
{
if (mode == NEWFROMNEARMV)
vp10_encode_mv(cpi, w, &mbmi->mv[ref].as_mv,
&mbmi_ext->ref_mvs[mbmi->ref_frame[ref]][1].as_mv,
......@@ -1108,13 +1134,25 @@ static void pack_inter_mode_mvs(VP10_COMP *cpi, const MODE_INFO *mi,
vp10_encode_mv(cpi, w, &mbmi->mv[ref].as_mv,
&mbmi_ext->ref_mvs[mbmi->ref_frame[ref]][0].as_mv, nmvc,
allow_hp);
#if CONFIG_EXT_INTER
}
#if CONFIG_EXT_INTER
} else if (mode == NEAREST_NEWMV || mode == NEAR_NEWMV) {
#if CONFIG_REF_MV
int nmv_ctx =
vp10_nmv_ctx(mbmi_ext->ref_mv_count[mbmi->ref_frame[1]],
mbmi_ext->ref_mv_stack[mbmi->ref_frame[1]]);
const nmv_context *nmvc = &cm->fc->nmvc[nmv_ctx];
#endif
vp10_encode_mv(cpi, w, &mbmi->mv[1].as_mv,
&mbmi_ext->ref_mvs[mbmi->ref_frame[1]][0].as_mv, nmvc,
allow_hp);
} else if (mode == NEW_NEARESTMV || mode == NEW_NEARMV) {
#if CONFIG_REF_MV
int nmv_ctx =
vp10_nmv_ctx(mbmi_ext->ref_mv_count[mbmi->ref_frame[0]],
mbmi_ext->ref_mv_stack[mbmi->ref_frame[0]]);
const nmv_context *nmvc = &cm->fc->nmvc[nmv_ctx];
#endif
vp10_encode_mv(cpi, w, &mbmi->mv[0].as_mv,
&mbmi_ext->ref_mvs[mbmi->ref_frame[0]][0].as_mv, nmvc,
allow_hp);
......@@ -2449,7 +2487,11 @@ static size_t write_compressed_header(VP10_COMP *cpi, uint8_t *data) {
counts->y_mode[i], INTRA_MODES, &header_bc);
vp10_write_nmv_probs(cm, cm->allow_high_precision_mv, &header_bc,
&counts->mv);
#if CONFIG_REF_MV
counts->mv);
#else
&counts->mv);
#endif
update_ext_tx_probs(cm, &header_bc);
#if CONFIG_SUPERTX
if (!xd->lossless[0])
......
......@@ -157,9 +157,49 @@ static void write_mv_update(const vpx_tree_index *tree,
}
void vp10_write_nmv_probs(VP10_COMMON *cm, int usehp, vpx_writer *w,
nmv_context_counts *const counts) {
nmv_context_counts *const nmv_counts) {
int i, j;
#if CONFIG_REF_MV
int nmv_ctx = 0;
for (nmv_ctx = 0; nmv_ctx < NMV_CONTEXTS; ++nmv_ctx) {
nmv_context *const mvc = &cm->fc->nmvc[nmv_ctx];
nmv_context_counts *const counts = &nmv_counts[nmv_ctx];
write_mv_update(vp10_mv_joint_tree, mvc->joints, counts->joints,
MV_JOINTS, w);
for (i = 0; i < 2; ++i) {
nmv_component *comp = &mvc->comps[i];
nmv_component_counts *comp_counts = &counts->comps[i];
update_mv(w, comp_counts->sign, &comp->sign, MV_UPDATE_PROB);
write_mv_update(vp10_mv_class_tree, comp->classes, comp_counts->classes,
MV_CLASSES, w);
write_mv_update(vp10_mv_class0_tree, comp->class0, comp_counts->class0,
CLASS0_SIZE, w);
for (j = 0; j < MV_OFFSET_BITS; ++j)
update_mv(w, comp_counts->bits[j], &comp->bits[j], MV_UPDATE_PROB);
}
for (i = 0; i < 2; ++i) {
for (j = 0; j < CLASS0_SIZE; ++j)
write_mv_update(vp10_mv_fp_tree, mvc->comps[i].class0_fp[j],
counts->comps[i].class0_fp[j], MV_FP_SIZE, w);
write_mv_update(vp10_mv_fp_tree, mvc->comps[i].fp, counts->comps[i].fp,
MV_FP_SIZE, w);
}
if (usehp) {
for (i = 0; i < 2; ++i) {
update_mv(w, counts->comps[i].class0_hp, &mvc->comps[i].class0_hp,
MV_UPDATE_PROB);
update_mv(w, counts->comps[i].hp, &mvc->comps[i].hp, MV_UPDATE_PROB);
}
}
}
#else
nmv_context *const mvc = &cm->fc->nmvc;
nmv_context_counts *const counts = nmv_counts;
write_mv_update(vp10_mv_joint_tree, mvc->joints, counts->joints, MV_JOINTS, w);
......@@ -192,6 +232,7 @@ void vp10_write_nmv_probs(VP10_COMMON *cm, int usehp, vpx_writer *w,
update_mv(w, counts->comps[i].hp, &mvc->comps[i].hp, MV_UPDATE_PROB);
}
}
#endif
}
void vp10_encode_mv(VP10_COMP* cpi, vpx_writer* w,
......@@ -227,27 +268,45 @@ void vp10_build_nmv_cost_table(int *mvjoint, int *mvcost[2],
#if CONFIG_EXT_INTER
static void inc_mvs(const MB_MODE_INFO *mbmi, const MB_MODE_INFO_EXT *mbmi_ext,
const int_mv mvs[2],
nmv_context_counts *counts) {
nmv_context_counts *nmv_counts) {
int i;
PREDICTION_MODE mode = mbmi->mode;
int mv_idx = (mode == NEWFROMNEARMV);
#if !CONFIG_REF_MV
nmv_context_counts *counts = nmv_counts;
#endif
if (mode == NEWMV || mode == NEWFROMNEARMV || mode == NEW_NEWMV) {
for (i = 0; i < 1 + has_second_ref(mbmi); ++i) {
const MV *ref = &mbmi_ext->ref_mvs[mbmi->ref_frame[i]][mv_idx].as_mv;
const MV diff = {mvs[i].as_mv.row - ref->row,
mvs[i].as_mv.col - ref->col};
#if CONFIG_REF_MV
int nmv_ctx = vp10_nmv_ctx(mbmi_ext->ref_mv_count[mbmi->ref_frame[i]],
mbmi_ext->ref_mv_stack[mbmi->ref_frame[i]]);
nmv_context_counts *counts = &nmv_counts[nmv_ctx];
#endif
vp10_inc_mv(&diff, counts, vp10_use_mv_hp(ref));
}
} else if (mode == NEAREST_NEWMV || mode == NEAR_NEWMV) {
const MV *ref = &mbmi_ext->ref_mvs[mbmi->ref_frame[1]][0].as_mv;
const MV diff = {mvs[1].as_mv.row - ref->row,
mvs[1].as_mv.col - ref->col};
#if CONFIG_REF_MV
int nmv_ctx = vp10_nmv_ctx(mbmi_ext->ref_mv_count[mbmi->ref_frame[1]],
mbmi_ext->ref_mv_stack[mbmi->ref_frame[1]]);
nmv_context_counts *counts = &nmv_counts[nmv_ctx];
#endif
vp10_inc_mv(&diff, counts, vp10_use_mv_hp(ref));
} else if (mode == NEW_NEARESTMV || mode == NEW_NEARMV) {
const MV *ref = &mbmi_ext->ref_mvs[mbmi->ref_frame[0]][0].as_mv;
const MV diff = {mvs[0].as_mv.row - ref->row,
mvs[0].as_mv.col - ref->col};
#if CONFIG_REF_MV
int nmv_ctx = vp10_nmv_ctx(mbmi_ext->ref_mv_count[mbmi->ref_frame[0]],
mbmi_ext->ref_mv_stack[mbmi->ref_frame[0]]);
nmv_context_counts *counts = &nmv_counts[nmv_ctx];
#endif
vp10_inc_mv(&diff, counts, vp10_use_mv_hp(ref));
}
}
......@@ -255,36 +314,67 @@ static void inc_mvs(const MB_MODE_INFO *mbmi, const MB_MODE_INFO_EXT *mbmi_ext,
static void inc_mvs_sub8x8(const MODE_INFO *mi,
int block,
const int_mv mvs[2],
nmv_context_counts *counts) {
#if CONFIG_REF_MV
const MB_MODE_INFO_EXT *mbmi_ext,
#endif
nmv_context_counts *nmv_counts) {
int i;
PREDICTION_MODE mode = mi->bmi[block].as_mode;
#if CONFIG_REF_MV
const MB_MODE_INFO *mbmi = &mi->mbmi;
#else
nmv_context_counts *counts = nmv_counts;
#endif
if (mode == NEWMV || mode == NEWFROMNEARMV || mode == NEW_NEWMV) {
for (i = 0; i < 1 + has_second_ref(&mi->mbmi); ++i) {
const MV *ref = &mi->bmi[block].ref_mv[i].as_mv;
const MV diff = {mvs[i].as_mv.row - ref->row,
mvs[i].as_mv.col - ref->col};
#if CONFIG_REF_MV
int nmv_ctx = vp10_nmv_ctx(mbmi_ext->ref_mv_count[mbmi->ref_frame[i]],
mbmi_ext->ref_mv_stack[mbmi->ref_frame[i]]);
nmv_context_counts *counts = &nmv_counts[nmv_ctx];
#endif
vp10_inc_mv(&diff, counts, vp10_use_mv_hp(ref));
}
} else if (mode == NEAREST_NEWMV || mode == NEAR_NEWMV) {
const MV *ref = &mi->bmi[block].ref_mv[1].as_mv;
const MV diff = {mvs[1].as_mv.row - ref->row,
mvs[1].as_mv.col - ref->col};
#if CONFIG_REF_MV
int nmv_ctx = vp10_nmv_ctx(mbmi_ext->ref_mv_count[mbmi->ref_frame[1]],
mbmi_ext->ref_mv_stack[mbmi->ref_frame[1]]);
nmv_context_counts *counts = &nmv_counts[nmv_ctx];
#endif
vp10_inc_mv(&diff, counts, vp10_use_mv_hp(ref));
} else if (mode == NEW_NEARESTMV || mode == NEW_NEARMV) {
const MV *ref = &mi->bmi[block].ref_mv[0].as_mv;
const MV diff = {mvs[0].as_mv.row - ref->row,
mvs[0].as_mv.col - ref->col};
#if CONFIG_REF_MV
int nmv_ctx = vp10_nmv_ctx(mbmi_ext->ref_mv_count[mbmi->ref_frame[0]],
mbmi_ext->ref_mv_stack[mbmi->ref_frame[0]]);
nmv_context_counts *counts = &nmv_counts[nmv_ctx];
#endif
vp10_inc_mv(&diff, counts, vp10_use_mv_hp(ref));
}
}
#else
static void inc_mvs(const MB_MODE_INFO *mbmi, const MB_MODE_INFO_EXT *mbmi_ext,
const int_mv mvs[2],
nmv_context_counts *counts) {
nmv_context_counts *nmv_counts) {
int i;
#if !CONFIG_REF_MV
nmv_context_counts *counts = nmv_counts;
#endif
for (i = 0; i < 1 + has_second_ref(mbmi); ++i) {
#if CONFIG_REF_MV
int nmv_ctx = vp10_nmv_ctx(mbmi_ext->ref_mv_count[mbmi->ref_frame[i]],
mbmi_ext->ref_mv_stack[mbmi->ref_frame[i]]);
nmv_context_counts *counts = &nmv_counts[nmv_ctx];
#endif
const MV *ref = &mbmi_ext->ref_mvs[mbmi->ref_frame[i]][0].as_mv;
const MV diff = {mvs[i].as_mv.row - ref->row,
mvs[i].as_mv.col - ref->col};
......@@ -310,10 +400,21 @@ void vp10_update_mv_count(ThreadData *td) {
#if CONFIG_EXT_INTER
if (have_newmv_in_inter_mode(mi->bmi[i].as_mode))
inc_mvs_sub8x8(mi, i, mi->bmi[i].as_mv, &td->counts->mv);
inc_mvs_sub8x8(mi, i, mi->bmi[i].as_mv,
#if CONFIG_REF_MV
mbmi_ext,
td->counts->mv);
#else
&td->counts->mv);
#endif
#else
if (mi->bmi[i].as_mode == NEWMV)
inc_mvs(mbmi, mbmi_ext, mi->bmi[i].as_mv, &td->counts->mv);
inc_mvs(mbmi, mbmi_ext, mi->bmi[i].as_mv,
#if CONFIG_REF_MV
td->counts->mv);