Commit ba30e774 authored by Paul Wilkins's avatar Paul Wilkins

Explicit MV reference experiment.

Coding and costing of mv reference signal.

Issues in updating MV ref with COMPANDED_MVREF_THRESH
to be resolved. Ideally the MV precision should be defined based
on absolute MV magnitude not as now the MV ref magnitude.

Update to mv counts moved into bitstream.c because otherwise
if the motion reference is changed at the last minute the encoder
and decoder get out of step in terms of the counts used to update
entropy probs.

Code working on a few test clips but no results yet re benefit vs
signaling cost and no tuning of red loop to test lower cost alternatives
based on the available reference values.

Patch 3. Added check to make sure we don't pick a reference
that would give rise to an uncodeable / out of range residual.

Patch 6-7: Attempt to rebase. OK to submit but best to leave flag off for now.

Patch 9. Remove print no longer needed.

Change-Id: I1938c2ffe41afe6d3cf6ccc0cb2c5d404809a712
parent ce9f61ec
......@@ -216,10 +216,8 @@ typedef struct {
MV_REFERENCE_FRAME ref_frame, second_ref_frame;
TX_SIZE txfm_size;
int_mv mv[2]; // for each reference frame used
#if CONFIG_NEWBESTREFMV
int_mv ref_mv, second_ref_mv;
#if CONFIG_NEWBESTREFMV || CONFIG_NEW_MVREF
int_mv ref_mvs[MAX_REF_FRAMES][MAX_MV_REFS];
int mv_ref_index[MAX_REF_FRAMES];
#endif
SPLITMV_PARTITIONING_TYPE partitioning;
......@@ -325,6 +323,9 @@ typedef struct MacroBlockD {
// Probability Tree used to code Segment number
vp8_prob mb_segment_tree_probs[MB_FEATURE_TREE_PROBS];
#if CONFIG_NEW_MVREF
vp8_prob mb_mv_ref_id_probs[MAX_REF_FRAMES][3];
#endif
// Segment features
signed char segment_feature_data[MAX_MB_SEGMENTS][SEG_LVL_MAX];
......@@ -379,11 +380,6 @@ typedef struct MacroBlockD {
#endif
int mb_index; // Index of the MB in the SB (0..3)
#if CONFIG_NEWBESTREFMV
int_mv ref_mv[MAX_MV_REFS];
#endif
int q_index;
} MACROBLOCKD;
......
......@@ -17,8 +17,13 @@
#define MV_COUNT_SAT 16
#define MV_MAX_UPDATE_FACTOR 160
#if CONFIG_NEW_MVREF
/* Integer pel reference mv threshold for use of high-precision 1/8 mv */
#define COMPANDED_MVREF_THRESH 1000000
#else
/* Integer pel reference mv threshold for use of high-precision 1/8 mv */
#define COMPANDED_MVREF_THRESH 8
#endif
/* Smooth or bias the mv-counts before prob computation */
/* #define SMOOTH_MV_COUNTS */
......
......@@ -54,19 +54,9 @@ void vp8_find_near_mvs
int *cntx = cnt;
enum {CNT_INTRA, CNT_NEAREST, CNT_NEAR, CNT_SPLITMV};
#if CONFIG_NEWBESTREFMV
int_mv *ref_mv = xd->ref_mv;
#endif
/* Zero accumulators */
mv[0].as_int = mv[1].as_int = mv[2].as_int = 0;
cnt[0] = cnt[1] = cnt[2] = cnt[3] = 0;
#if CONFIG_NEWBESTREFMV
ref_mv[0].as_int = ref_mv[1].as_int
= ref_mv[2].as_int
= ref_mv[3].as_int
= 0;
#endif
/* Process above */
if (above->mbmi.ref_frame != INTRA_FRAME) {
......@@ -75,9 +65,6 @@ void vp8_find_near_mvs
mv->as_int = above->mbmi.mv[0].as_int;
mv_bias(ref_frame_sign_bias[above->mbmi.ref_frame],
refframe, mv, ref_frame_sign_bias);
#if CONFIG_NEWBESTREFMV
ref_mv[0].as_int = mv->as_int;
#endif
++cntx;
}
*cntx += 2;
......@@ -90,9 +77,7 @@ void vp8_find_near_mvs
this_mv.as_int = left->mbmi.mv[0].as_int;
mv_bias(ref_frame_sign_bias[left->mbmi.ref_frame],
refframe, &this_mv, ref_frame_sign_bias);
#if CONFIG_NEWBESTREFMV
ref_mv[1].as_int = this_mv.as_int;
#endif
if (this_mv.as_int != mv->as_int) {
++ mv;
mv->as_int = this_mv.as_int;
......@@ -107,21 +92,9 @@ void vp8_find_near_mvs
(lf_here->mbmi.ref_frame == LAST_FRAME && refframe == LAST_FRAME)) {
if (aboveleft->mbmi.mv[0].as_int) {
third = aboveleft;
#if CONFIG_NEWBESTREFMV
ref_mv[2].as_int = aboveleft->mbmi.mv[0].as_int;
mv_bias(ref_frame_sign_bias[aboveleft->mbmi.ref_frame],
refframe, (ref_mv+2), ref_frame_sign_bias);
#endif
} else if (lf_here->mbmi.mv[0].as_int) {
third = lf_here;
}
#if CONFIG_NEWBESTREFMV
if (lf_here->mbmi.mv[0].as_int) {
ref_mv[3].as_int = lf_here->mbmi.mv[0].as_int;
mv_bias(ref_frame_sign_bias[lf_here->mbmi.ref_frame],
refframe, (ref_mv+3), ref_frame_sign_bias);
}
#endif
if (third) {
int_mv this_mv;
this_mv.as_int = third->mbmi.mv[0].as_int;
......@@ -294,6 +267,12 @@ void vp8_find_best_ref_mvs(MACROBLOCKD *xd,
}
}
// Make sure all the candidates are properly clamped etc
for (i = 0; i < 4; ++i) {
lower_mv_precision(&sorted_mvs[i], xd->allow_high_precision_mv);
vp8_clamp_mv2(&sorted_mvs[i], xd);
}
// Set the best mv to the first entry in the sorted list
best_mv->as_int = sorted_mvs[0].as_int;
......@@ -316,9 +295,6 @@ void vp8_find_best_ref_mvs(MACROBLOCKD *xd,
// Copy back the re-ordered mv list
vpx_memcpy(mvlist, sorted_mvs, sizeof(sorted_mvs));
lower_mv_precision(best_mv, xd->allow_high_precision_mv);
vp8_clamp_mv2(best_mv, xd);
}
#endif // CONFIG_NEWBESTREFMV
......@@ -72,6 +72,23 @@ static void vp8_read_mb_segid(vp8_reader *r, MB_MODE_INFO *mi,
}
}
#if CONFIG_NEW_MVREF
int vp8_read_mv_ref_id(vp8_reader *r,
vp8_prob * ref_id_probs) {
int ref_index = 0;
if (vp8_read(r, ref_id_probs[0])) {
ref_index++;
if (vp8_read(r, ref_id_probs[1])) {
ref_index++;
if (vp8_read(r, ref_id_probs[2]))
ref_index++;
}
}
return ref_index;
}
#endif
extern const int vp8_i8x8_block[4];
static void kfread_modes(VP8D_COMP *pbi,
MODE_INFO *m,
......@@ -530,6 +547,12 @@ static void mb_mode_mv_init(VP8D_COMP *pbi, vp8_reader *bc) {
cm->fc.ymode_prob[i] = (vp8_prob) vp8_read_literal(bc, 8);
} while (++i < VP8_YMODES - 1);
}
#if CONFIG_NEW_MVREF
// Temp defaults probabilities for ecnoding the MV ref id signal
vpx_memset(xd->mb_mv_ref_id_probs, 192, sizeof(xd->mb_mv_ref_id_probs));
#endif
read_nmvprobs(bc, nmvc, xd->allow_high_precision_mv);
}
}
......@@ -708,13 +731,9 @@ static void read_mb_modes_mv(VP8D_COMP *pbi, MODE_INFO *mi, MB_MODE_INFO *mbmi,
xd->pre.u_buffer = cm->yv12_fb[ref_fb_idx].u_buffer + recon_uvoffset;
xd->pre.v_buffer = cm->yv12_fb[ref_fb_idx].v_buffer + recon_uvoffset;
// Update stats on relative distance of chosen vector to the
// possible best reference vectors.
{
find_mv_refs(xd, mi, prev_mi,
ref_frame, mbmi->ref_mvs[ref_frame],
cm->ref_frame_sign_bias );
}
find_mv_refs(xd, mi, prev_mi,
ref_frame, mbmi->ref_mvs[ref_frame],
cm->ref_frame_sign_bias);
vp8_find_best_ref_mvs(xd,
xd->pre.y_buffer,
......@@ -799,15 +818,10 @@ static void read_mb_modes_mv(VP8D_COMP *pbi, MODE_INFO *mi, MB_MODE_INFO *mbmi,
mbmi->second_ref_frame,
cm->ref_frame_sign_bias);
// Update stats on relative distance of chosen vector to the
// possible best reference vectors.
{
MV_REFERENCE_FRAME ref_frame = mbmi->second_ref_frame;
find_mv_refs(xd, mi, prev_mi,
ref_frame, mbmi->ref_mvs[ref_frame],
cm->ref_frame_sign_bias );
}
find_mv_refs(xd, mi, prev_mi,
mbmi->second_ref_frame,
mbmi->ref_mvs[mbmi->second_ref_frame],
cm->ref_frame_sign_bias);
vp8_find_best_ref_mvs(xd,
xd->second_pre.y_buffer,
......@@ -977,11 +991,26 @@ static void read_mb_modes_mv(VP8D_COMP *pbi, MODE_INFO *mi, MB_MODE_INFO *mbmi,
break;
case NEWMV:
#if CONFIG_NEW_MVREF
{
int best_index;
MV_REFERENCE_FRAME ref_frame = mbmi->ref_frame;
// Encode the index of the choice.
best_index =
vp8_read_mv_ref_id(bc, xd->mb_mv_ref_id_probs[ref_frame]);
best_mv.as_int = mbmi->ref_mvs[ref_frame][best_index].as_int;
}
#endif
read_nmv(bc, &mv->as_mv, &best_mv.as_mv, nmvc);
read_nmv_fp(bc, &mv->as_mv, &best_mv.as_mv, nmvc,
xd->allow_high_precision_mv);
vp8_increment_nmv(&mv->as_mv, &best_mv.as_mv, &cm->fc.NMVcount,
xd->allow_high_precision_mv);
mv->as_mv.row += best_mv.as_mv.row;
mv->as_mv.col += best_mv.as_mv.col;
......@@ -995,7 +1024,20 @@ static void read_mb_modes_mv(VP8D_COMP *pbi, MODE_INFO *mi, MB_MODE_INFO *mbmi,
mb_to_right_edge,
mb_to_top_edge,
mb_to_bottom_edge);
if (mbmi->second_ref_frame) {
#if CONFIG_NEW_MVREF
{
int best_index;
MV_REFERENCE_FRAME ref_frame = mbmi->second_ref_frame;
// Encode the index of the choice.
best_index =
vp8_read_mv_ref_id(bc, xd->mb_mv_ref_id_probs[ref_frame]);
best_mv_second.as_int = mbmi->ref_mvs[ref_frame][best_index].as_int;
}
#endif
read_nmv(bc, &mbmi->mv[1].as_mv, &best_mv_second.as_mv, nmvc);
read_nmv_fp(bc, &mbmi->mv[1].as_mv, &best_mv_second.as_mv, nmvc,
xd->allow_high_precision_mv);
......
......@@ -28,6 +28,7 @@
#include "vp8/common/pred_common.h"
#include "vp8/common/entropy.h"
#include "vp8/encoder/encodemv.h"
#include "vp8/common/entropymv.h"
#if CONFIG_NEWBESTREFMV
#include "vp8/common/mvref_common.h"
......@@ -116,69 +117,6 @@ static int prob_diff_update_cost(vp8_prob newp, vp8_prob oldp) {
return update_bits[delp] * 256;
}
#if CONFIG_NEW_MVREF
// Estimate the cost of each coding the vector using each reference candidate
unsigned int pick_best_mv_ref( MACROBLOCK *x,
int_mv target_mv,
int_mv * mv_ref_list,
int_mv * best_ref ) {
int i;
int best_index = 0;
int cost, cost2;
int index_cost[MAX_MV_REFS];
MACROBLOCKD *xd = &x->e_mbd;
/*unsigned int distance, distance2;
distance = mv_distance(&target_mv, &mv_ref_list[0]);
for (i = 1; i < MAX_MV_REFS; ++i ) {
distance2 =
mv_distance(&target_mv, &mv_ref_list[i]);
if (distance2 < distance) {
distance = distance2;
best_index = i;
}
}*/
// For now estimate the cost of selecting a given ref index
// as index * 1 bits (but here 1 bit is scaled to 256)
for (i = 0; i < MAX_MV_REFS; ++i ) {
index_cost[i] = i << 8;
}
index_cost[0] = vp8_cost_zero(205);
index_cost[1] = vp8_cost_zero(40);
index_cost[2] = vp8_cost_zero(8);
index_cost[3] = vp8_cost_zero(2);
cost = index_cost[0] +
vp8_mv_bit_cost(&target_mv,
&mv_ref_list[0],
XMVCOST, 96,
xd->allow_high_precision_mv);
//for (i = 1; i < MAX_MV_REFS; ++i ) {
for (i = 1; i < 4; ++i ) {
cost2 = index_cost[i] +
vp8_mv_bit_cost(&target_mv,
&mv_ref_list[i],
XMVCOST, 96,
xd->allow_high_precision_mv);
if (cost2 < cost) {
cost = cost2;
best_index = i;
}
}
(*best_ref).as_int = mv_ref_list[best_index].as_int;
return best_index;
}
#endif
static void update_mode(
vp8_writer *const bc,
int n,
......@@ -321,6 +259,70 @@ static void update_refpred_stats(VP8_COMP *cpi) {
}
}
static void update_mvcount(VP8_COMP *cpi, MACROBLOCK *x,
int_mv *best_ref_mv, int_mv *second_best_ref_mv) {
MB_MODE_INFO * mbmi = &x->e_mbd.mode_info_context->mbmi;
MV mv;
if (mbmi->mode == SPLITMV) {
int i;
for (i = 0; i < x->partition_info->count; i++) {
if (x->partition_info->bmi[i].mode == NEW4X4) {
if (x->e_mbd.allow_high_precision_mv) {
mv.row = (x->partition_info->bmi[i].mv.as_mv.row
- best_ref_mv->as_mv.row);
mv.col = (x->partition_info->bmi[i].mv.as_mv.col
- best_ref_mv->as_mv.col);
vp8_increment_nmv(&mv, &best_ref_mv->as_mv, &cpi->NMVcount, 1);
if (x->e_mbd.mode_info_context->mbmi.second_ref_frame) {
mv.row = (x->partition_info->bmi[i].second_mv.as_mv.row
- second_best_ref_mv->as_mv.row);
mv.col = (x->partition_info->bmi[i].second_mv.as_mv.col
- second_best_ref_mv->as_mv.col);
vp8_increment_nmv(&mv, &second_best_ref_mv->as_mv,
&cpi->NMVcount, 1);
}
} else {
mv.row = (x->partition_info->bmi[i].mv.as_mv.row
- best_ref_mv->as_mv.row);
mv.col = (x->partition_info->bmi[i].mv.as_mv.col
- best_ref_mv->as_mv.col);
vp8_increment_nmv(&mv, &best_ref_mv->as_mv, &cpi->NMVcount, 0);
if (x->e_mbd.mode_info_context->mbmi.second_ref_frame) {
mv.row = (x->partition_info->bmi[i].second_mv.as_mv.row
- second_best_ref_mv->as_mv.row);
mv.col = (x->partition_info->bmi[i].second_mv.as_mv.col
- second_best_ref_mv->as_mv.col);
vp8_increment_nmv(&mv, &second_best_ref_mv->as_mv,
&cpi->NMVcount, 0);
}
}
}
}
} else if (mbmi->mode == NEWMV) {
if (x->e_mbd.allow_high_precision_mv) {
mv.row = (mbmi->mv[0].as_mv.row - best_ref_mv->as_mv.row);
mv.col = (mbmi->mv[0].as_mv.col - best_ref_mv->as_mv.col);
vp8_increment_nmv(&mv, &best_ref_mv->as_mv, &cpi->NMVcount, 1);
if (mbmi->second_ref_frame) {
mv.row = (mbmi->mv[1].as_mv.row - second_best_ref_mv->as_mv.row);
mv.col = (mbmi->mv[1].as_mv.col - second_best_ref_mv->as_mv.col);
vp8_increment_nmv(&mv, &second_best_ref_mv->as_mv, &cpi->NMVcount, 1);
}
} else {
mv.row = (mbmi->mv[0].as_mv.row - best_ref_mv->as_mv.row);
mv.col = (mbmi->mv[0].as_mv.col - best_ref_mv->as_mv.col);
vp8_increment_nmv(&mv, &best_ref_mv->as_mv, &cpi->NMVcount, 0);
if (mbmi->second_ref_frame) {
mv.row = (mbmi->mv[1].as_mv.row - second_best_ref_mv->as_mv.row);
mv.col = (mbmi->mv[1].as_mv.col - second_best_ref_mv->as_mv.col);
vp8_increment_nmv(&mv, &second_best_ref_mv->as_mv, &cpi->NMVcount, 0);
}
}
}
}
static void write_ymode(vp8_writer *bc, int m, const vp8_prob *p) {
vp8_write_token(bc, vp8_ymode_tree, p, vp8_ymode_encodings + m);
}
......@@ -619,6 +621,124 @@ static void write_nmv(vp8_writer *bc, const MV *mv, const int_mv *ref,
vp8_encode_nmv_fp(bc, &e, &ref->as_mv, nmvc, usehp);
}
#if CONFIG_NEW_MVREF
static int vp8_cost_mv_ref_id(vp8_prob * ref_id_probs, int mv_ref_id) {
int cost;
// Encode the index for the MV reference.
switch (mv_ref_id) {
case 0:
cost = vp8_cost_zero(ref_id_probs[0]);
break;
case 1:
cost = vp8_cost_one(ref_id_probs[0]);
cost += vp8_cost_zero(ref_id_probs[1]);
break;
case 2:
cost = vp8_cost_one(ref_id_probs[0]);
cost += vp8_cost_one(ref_id_probs[1]);
cost += vp8_cost_zero(ref_id_probs[2]);
break;
case 3:
cost = vp8_cost_one(ref_id_probs[0]);
cost += vp8_cost_one(ref_id_probs[1]);
cost += vp8_cost_one(ref_id_probs[2]);
break;
// TRAP.. This should not happen
default:
assert(0);
break;
}
return cost;
}
static void vp8_write_mv_ref_id(vp8_writer *w,
vp8_prob * ref_id_probs,
int mv_ref_id) {
// Encode the index for the MV reference.
switch (mv_ref_id) {
case 0:
vp8_write(w, 0, ref_id_probs[0]);
break;
case 1:
vp8_write(w, 1, ref_id_probs[0]);
vp8_write(w, 0, ref_id_probs[1]);
break;
case 2:
vp8_write(w, 1, ref_id_probs[0]);
vp8_write(w, 1, ref_id_probs[1]);
vp8_write(w, 0, ref_id_probs[2]);
break;
case 3:
vp8_write(w, 1, ref_id_probs[0]);
vp8_write(w, 1, ref_id_probs[1]);
vp8_write(w, 1, ref_id_probs[2]);
break;
// TRAP.. This should not happen
default:
assert(0);
break;
}
}
// Estimate the cost of each coding the vector using each reference candidate
static unsigned int pick_best_mv_ref(MACROBLOCK *x,
MV_REFERENCE_FRAME ref_frame,
int_mv target_mv,
int_mv * mv_ref_list,
int_mv * best_ref) {
int i;
int best_index = 0;
int cost, cost2;
int zero_seen = (mv_ref_list[0].as_int) ? FALSE : TRUE;
MACROBLOCKD *xd = &x->e_mbd;
int max_mv = MV_MAX;
cost = vp8_cost_mv_ref_id(xd->mb_mv_ref_id_probs[ref_frame], 0) +
vp8_mv_bit_cost(&target_mv,
&mv_ref_list[0],
XMVCOST, 96,
xd->allow_high_precision_mv);
// Use 4 for now : for (i = 1; i < MAX_MV_REFS; ++i ) {
for (i = 1; i < 4; ++i) {
// If we see a 0,0 reference vector for a second time we have reached
// the end of the list of valid candidate vectors.
if (!mv_ref_list[i].as_int)
if (zero_seen)
break;
else
zero_seen = TRUE;
// Check for cases where the reference choice would give rise to an
// uncodable/out of range residual for row or col.
if ((abs(target_mv.as_mv.row - mv_ref_list[i].as_mv.row) > max_mv) ||
(abs(target_mv.as_mv.col - mv_ref_list[i].as_mv.col) > max_mv)) {
continue;
}
cost2 = vp8_cost_mv_ref_id(xd->mb_mv_ref_id_probs[ref_frame], i) +
vp8_mv_bit_cost(&target_mv,
&mv_ref_list[i],
XMVCOST, 96,
xd->allow_high_precision_mv);
if (cost2 < cost) {
cost = cost2;
best_index = i;
}
}
(*best_ref).as_int = mv_ref_list[best_index].as_int;
return best_index;
}
#endif
// This function writes the current macro block's segnment id to the bitstream
// It should only be called if a segment map update is indicated.
static void write_mb_segid(vp8_writer *bc,
......@@ -931,11 +1051,13 @@ static void pack_inter_mode_mvs(VP8_COMP *const cpi, vp8_writer *const bc) {
{
int_mv n1, n2;
// Only used for context just now and soon to be deprecated.
vp8_find_near_mvs(xd, m, prev_m, &n1, &n2, &best_mv, ct,
rf, cpi->common.ref_frame_sign_bias);
#if CONFIG_NEWBESTREFMV
best_mv.as_int = mi->ref_mv.as_int;
best_mv.as_int = mi->ref_mvs[rf][0].as_int;
#endif
vp8_mv_ref_probs(&cpi->common, mv_ref_p, ct);
#ifdef ENTROPY_STATS
......@@ -988,13 +1110,15 @@ static void pack_inter_mode_mvs(VP8_COMP *const cpi, vp8_writer *const bc) {
(mode == NEWMV || mode == SPLITMV)) {
int_mv n1, n2;
vp8_find_near_mvs(xd, m,
prev_m,
// Only used for context just now and soon to be deprecated.
vp8_find_near_mvs(xd, m, prev_m,
&n1, &n2, &best_second_mv, ct,
mi->second_ref_frame,
cpi->common.ref_frame_sign_bias);
#if CONFIG_NEWBESTREFMV
best_second_mv.as_int = mi->second_ref_mv.as_int;
best_second_mv.as_int =
mi->ref_mvs[mi->second_ref_frame][0].as_int;
#endif
}
......@@ -1012,38 +1136,43 @@ static void pack_inter_mode_mvs(VP8_COMP *const cpi, vp8_writer *const bc) {
active_section = 5;
#endif
#if 0 //CONFIG_NEW_MVREF
#if CONFIG_NEW_MVREF
{
unsigned int best_index;
/*find_mv_refs(xd, m, prev_m,
m->mbmi.ref_frame,
mi->ref_mvs[rf],
cpi->common.ref_frame_sign_bias );*/
best_index = pick_best_mv_ref(x, mi->mv[0],
// Choose the best mv reference
best_index = pick_best_mv_ref(x, rf, mi->mv[0],
mi->ref_mvs[rf], &best_mv);
cpi->best_ref_index_counts[best_index]++;
// Encode the index of the choice.
vp8_write_mv_ref_id(bc,
xd->mb_mv_ref_id_probs[rf], best_index);
cpi->best_ref_index_counts[rf][best_index]++;
}
#endif
write_nmv(bc, &mi->mv[0].as_mv, &best_mv,
(const nmv_context*) nmvc,
xd->allow_high_precision_mv);
if (mi->second_ref_frame) {
#if 0 //CONFIG_NEW_MVREF
#if CONFIG_NEW_MVREF
unsigned int best_index;
/*find_mv_refs(xd, m, prev_m,
m->mbmi.second_ref_frame,
mi->ref_mvs[mi->second_ref_frame],
cpi->common.ref_frame_sign_bias );*/
MV_REFERENCE_FRAME sec_ref_frame = mi->second_ref_frame;
best_index =
pick_best_mv_ref(x, mi->mv[1],
mi->ref_mvs[mi->second_ref_frame],
pick_best_mv_ref(x, sec_ref_frame, mi->mv[1],
mi->ref_mvs[sec_ref_frame],
&best_second_mv);
cpi->best_ref_index_counts[best_index]++;
// Encode the index of the choice.
vp8_write_mv_ref_id(bc,
xd->mb_mv_ref_id_probs[sec_ref_frame],
best_index);
cpi->best_ref_index_counts[sec_ref_frame][best_index]++;
#endif
write_nmv(bc, &mi->mv[1].as_mv, &best_second_mv,
(const nmv_context*) nmvc,
......@@ -1108,6 +1237,12 @@ static void pack_inter_mode_mvs(VP8_COMP *const cpi, vp8_writer *const bc) {
break;
}
}
// Update the mvcounts used to tune mv probs but only if this is
// the real pack run.
if ( !cpi->dummy_packing ) {
update_mvcount(cpi, x, &best_mv, &best_second_mv);
}
}
if (((rf == INTRA_FRAME && mode <= I8X8_PRED) ||
......@@ -2160,6 +2295,11 @@ void vp8_pack_bitstream(VP8_COMP *cpi, unsigned char *dest, unsigned long *size)
update_mbintra_mode_probs(cpi, &header_bc);
#if CONFIG_NEW_MVREF
// Temp defaults probabilities for ecnoding the MV ref id signal
vpx_memset(xd->mb_mv_ref_id_probs, 192, sizeof(xd->mb_mv_ref_id_probs));
#endif
vp8_write_nmvprobs(cpi, xd->allow_high_precision_mv, &