Commit c3bbb291 authored by Yunqing Wang's avatar Yunqing Wang
Browse files

Improve MV prediction accuracy to achieve performance gain

Add vp8_mv_pred() to better predict starting MV for NEWMV
mode in vp8_rd_pick_inter_mode(). Set different search
ranges according to MV prediction accuracy, which improves
encoder performance without hurting the quality. Also,
as Yaowu suggested, using diamond search result as full
search starting point and therefore adjusting(reducing)
full search range helps the performance.

Change-Id: Ie4a3c8df87e697c1f4f6e2ddb693766bba1b77b6
parent a5397dba
...@@ -472,7 +472,7 @@ void vp8_first_pass_motion_search(VP8_COMP *cpi, MACROBLOCK *x, MV *ref_mv, MV * ...@@ -472,7 +472,7 @@ void vp8_first_pass_motion_search(VP8_COMP *cpi, MACROBLOCK *x, MV *ref_mv, MV *
xd->pre.y_buffer = recon_buffer->y_buffer + recon_yoffset; xd->pre.y_buffer = recon_buffer->y_buffer + recon_yoffset;
// Initial step/diamond search centred on best mv // Initial step/diamond search centred on best mv
tmp_err = cpi->diamond_search_sad(x, b, d, ref_mv, &tmp_mv, step_param, x->errorperbit, &num00, &v_fn_ptr, x->mvsadcost, x->mvcost); tmp_err = cpi->diamond_search_sad(x, b, d, ref_mv, &tmp_mv, step_param, x->errorperbit, &num00, &v_fn_ptr, x->mvsadcost, x->mvcost, ref_mv);
if ( tmp_err < INT_MAX-new_mv_mode_penalty ) if ( tmp_err < INT_MAX-new_mv_mode_penalty )
tmp_err += new_mv_mode_penalty; tmp_err += new_mv_mode_penalty;
...@@ -495,7 +495,7 @@ void vp8_first_pass_motion_search(VP8_COMP *cpi, MACROBLOCK *x, MV *ref_mv, MV * ...@@ -495,7 +495,7 @@ void vp8_first_pass_motion_search(VP8_COMP *cpi, MACROBLOCK *x, MV *ref_mv, MV *
num00--; num00--;
else else
{ {
tmp_err = cpi->diamond_search_sad(x, b, d, ref_mv, &tmp_mv, step_param + n, x->errorperbit, &num00, &v_fn_ptr, x->mvsadcost, x->mvcost); tmp_err = cpi->diamond_search_sad(x, b, d, ref_mv, &tmp_mv, step_param + n, x->errorperbit, &num00, &v_fn_ptr, x->mvsadcost, x->mvcost, ref_mv);
if ( tmp_err < INT_MAX-new_mv_mode_penalty ) if ( tmp_err < INT_MAX-new_mv_mode_penalty )
tmp_err += new_mv_mode_penalty; tmp_err += new_mv_mode_penalty;
......
...@@ -913,7 +913,8 @@ int vp8_diamond_search_sad ...@@ -913,7 +913,8 @@ int vp8_diamond_search_sad
int *num00, int *num00,
vp8_variance_fn_ptr_t *fn_ptr, vp8_variance_fn_ptr_t *fn_ptr,
int *mvsadcost[2], int *mvsadcost[2],
int *mvcost[2] int *mvcost[2],
MV *center_mv
) )
{ {
int i, j, step; int i, j, step;
...@@ -949,7 +950,7 @@ int vp8_diamond_search_sad ...@@ -949,7 +950,7 @@ int vp8_diamond_search_sad
(ref_row > x->mv_row_min) && (ref_row < x->mv_row_max)) (ref_row > x->mv_row_min) && (ref_row < x->mv_row_max))
{ {
// Check the starting position // Check the starting position
bestsad = fn_ptr->sdf(what, what_stride, in_what, in_what_stride, 0x7fffffff) + vp8_mv_err_cost(ref_mv, ref_mv, mvsadcost, error_per_bit); bestsad = fn_ptr->sdf(what, what_stride, in_what, in_what_stride, 0x7fffffff) + vp8_mv_err_cost(ref_mv, center_mv, mvsadcost, error_per_bit);
} }
// search_param determines the length of the initial step and hence the number of iterations // search_param determines the length of the initial step and hence the number of iterations
...@@ -982,7 +983,7 @@ int vp8_diamond_search_sad ...@@ -982,7 +983,7 @@ int vp8_diamond_search_sad
{ {
this_mv.row = this_row_offset << 3; this_mv.row = this_row_offset << 3;
this_mv.col = this_col_offset << 3; this_mv.col = this_col_offset << 3;
thissad += vp8_mv_err_cost(&this_mv, ref_mv, mvsadcost, error_per_bit); thissad += vp8_mv_err_cost(&this_mv, center_mv, mvsadcost, error_per_bit);
if (thissad < bestsad) if (thissad < bestsad)
{ {
...@@ -1013,7 +1014,7 @@ int vp8_diamond_search_sad ...@@ -1013,7 +1014,7 @@ int vp8_diamond_search_sad
return INT_MAX; return INT_MAX;
return fn_ptr->vf(what, what_stride, best_address, in_what_stride, (unsigned int *)(&thissad)) return fn_ptr->vf(what, what_stride, best_address, in_what_stride, (unsigned int *)(&thissad))
+ vp8_mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit); + vp8_mv_err_cost(&this_mv, center_mv, mvcost, error_per_bit);
} }
int vp8_diamond_search_sadx4 int vp8_diamond_search_sadx4
...@@ -1028,7 +1029,8 @@ int vp8_diamond_search_sadx4 ...@@ -1028,7 +1029,8 @@ int vp8_diamond_search_sadx4
int *num00, int *num00,
vp8_variance_fn_ptr_t *fn_ptr, vp8_variance_fn_ptr_t *fn_ptr,
int *mvsadcost[2], int *mvsadcost[2],
int *mvcost[2] int *mvcost[2],
MV *center_mv
) )
{ {
int i, j, step; int i, j, step;
...@@ -1064,7 +1066,7 @@ int vp8_diamond_search_sadx4 ...@@ -1064,7 +1066,7 @@ int vp8_diamond_search_sadx4
(ref_row > x->mv_row_min) && (ref_row < x->mv_row_max)) (ref_row > x->mv_row_min) && (ref_row < x->mv_row_max))
{ {
// Check the starting position // Check the starting position
bestsad = fn_ptr->sdf(what, what_stride, in_what, in_what_stride, 0x7fffffff) + vp8_mv_err_cost(ref_mv, ref_mv, mvsadcost, error_per_bit); bestsad = fn_ptr->sdf(what, what_stride, in_what, in_what_stride, 0x7fffffff) + vp8_mv_err_cost(ref_mv, center_mv, mvsadcost, error_per_bit);
} }
// search_param determines the length of the initial step and hence the number of iterations // search_param determines the length of the initial step and hence the number of iterations
...@@ -1108,7 +1110,7 @@ int vp8_diamond_search_sadx4 ...@@ -1108,7 +1110,7 @@ int vp8_diamond_search_sadx4
{ {
this_mv.row = (best_mv->row + ss[i].mv.row) << 3; this_mv.row = (best_mv->row + ss[i].mv.row) << 3;
this_mv.col = (best_mv->col + ss[i].mv.col) << 3; this_mv.col = (best_mv->col + ss[i].mv.col) << 3;
sad_array[t] += vp8_mv_err_cost(&this_mv, ref_mv, mvsadcost, error_per_bit); sad_array[t] += vp8_mv_err_cost(&this_mv, center_mv, mvsadcost, error_per_bit);
if (sad_array[t] < bestsad) if (sad_array[t] < bestsad)
{ {
...@@ -1137,7 +1139,7 @@ int vp8_diamond_search_sadx4 ...@@ -1137,7 +1139,7 @@ int vp8_diamond_search_sadx4
{ {
this_mv.row = this_row_offset << 3; this_mv.row = this_row_offset << 3;
this_mv.col = this_col_offset << 3; this_mv.col = this_col_offset << 3;
thissad += vp8_mv_err_cost(&this_mv, ref_mv, mvsadcost, error_per_bit); thissad += vp8_mv_err_cost(&this_mv, center_mv, mvsadcost, error_per_bit);
if (thissad < bestsad) if (thissad < bestsad)
{ {
...@@ -1168,12 +1170,12 @@ int vp8_diamond_search_sadx4 ...@@ -1168,12 +1170,12 @@ int vp8_diamond_search_sadx4
return INT_MAX; return INT_MAX;
return fn_ptr->vf(what, what_stride, best_address, in_what_stride, (unsigned int *)(&thissad)) return fn_ptr->vf(what, what_stride, best_address, in_what_stride, (unsigned int *)(&thissad))
+ vp8_mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit); + vp8_mv_err_cost(&this_mv, center_mv, mvcost, error_per_bit);
} }
#if !(CONFIG_REALTIME_ONLY) #if !(CONFIG_REALTIME_ONLY)
int vp8_full_search_sad(MACROBLOCK *x, BLOCK *b, BLOCKD *d, MV *ref_mv, int error_per_bit, int distance, vp8_variance_fn_ptr_t *fn_ptr, int *mvcost[2], int *mvsadcost[2]) int vp8_full_search_sad(MACROBLOCK *x, BLOCK *b, BLOCKD *d, MV *ref_mv, int error_per_bit, int distance, vp8_variance_fn_ptr_t *fn_ptr, int *mvcost[2], int *mvsadcost[2], MV *center_mv)
{ {
unsigned char *what = (*(b->base_src) + b->src); unsigned char *what = (*(b->base_src) + b->src);
int what_stride = b->src_stride; int what_stride = b->src_stride;
...@@ -1211,7 +1213,7 @@ int vp8_full_search_sad(MACROBLOCK *x, BLOCK *b, BLOCKD *d, MV *ref_mv, int erro ...@@ -1211,7 +1213,7 @@ int vp8_full_search_sad(MACROBLOCK *x, BLOCK *b, BLOCKD *d, MV *ref_mv, int erro
// Baseline value at the centre // Baseline value at the centre
//bestsad = fn_ptr->sf( what,what_stride,bestaddress,in_what_stride) + (int)sqrt(vp8_mv_err_cost(ref_mv,ref_mv, mvcost,error_per_bit*14)); //bestsad = fn_ptr->sf( what,what_stride,bestaddress,in_what_stride) + (int)sqrt(vp8_mv_err_cost(ref_mv,ref_mv, mvcost,error_per_bit*14));
bestsad = fn_ptr->sdf(what, what_stride, bestaddress, in_what_stride, 0x7fffffff) + vp8_mv_err_cost(ref_mv, ref_mv, mvsadcost, error_per_bit); bestsad = fn_ptr->sdf(what, what_stride, bestaddress, in_what_stride, 0x7fffffff) + vp8_mv_err_cost(ref_mv, center_mv, mvsadcost, error_per_bit);
} }
// Apply further limits to prevent us looking using vectors that stretch beyiond the UMV border // Apply further limits to prevent us looking using vectors that stretch beyiond the UMV border
...@@ -1239,7 +1241,7 @@ int vp8_full_search_sad(MACROBLOCK *x, BLOCK *b, BLOCKD *d, MV *ref_mv, int erro ...@@ -1239,7 +1241,7 @@ int vp8_full_search_sad(MACROBLOCK *x, BLOCK *b, BLOCKD *d, MV *ref_mv, int erro
this_mv.col = c << 3; this_mv.col = c << 3;
//thissad += (int)sqrt(vp8_mv_err_cost(&this_mv,ref_mv, mvcost,error_per_bit*14)); //thissad += (int)sqrt(vp8_mv_err_cost(&this_mv,ref_mv, mvcost,error_per_bit*14));
//thissad += error_per_bit * mv_bits_sadcost[mv_bits(&this_mv, ref_mv, mvcost)]; //thissad += error_per_bit * mv_bits_sadcost[mv_bits(&this_mv, ref_mv, mvcost)];
thissad += vp8_mv_err_cost(&this_mv, ref_mv, mvsadcost, error_per_bit); //mv_bits(error_per_bit, &this_mv, ref_mv, mvsadcost); thissad += vp8_mv_err_cost(&this_mv, center_mv, mvsadcost, error_per_bit); //mv_bits(error_per_bit, &this_mv, ref_mv, mvsadcost);
if (thissad < bestsad) if (thissad < bestsad)
{ {
...@@ -1258,12 +1260,12 @@ int vp8_full_search_sad(MACROBLOCK *x, BLOCK *b, BLOCKD *d, MV *ref_mv, int erro ...@@ -1258,12 +1260,12 @@ int vp8_full_search_sad(MACROBLOCK *x, BLOCK *b, BLOCKD *d, MV *ref_mv, int erro
if (bestsad < INT_MAX) if (bestsad < INT_MAX)
return fn_ptr->vf(what, what_stride, bestaddress, in_what_stride, (unsigned int *)(&thissad)) return fn_ptr->vf(what, what_stride, bestaddress, in_what_stride, (unsigned int *)(&thissad))
+ vp8_mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit); + vp8_mv_err_cost(&this_mv, center_mv, mvcost, error_per_bit);
else else
return INT_MAX; return INT_MAX;
} }
int vp8_full_search_sadx3(MACROBLOCK *x, BLOCK *b, BLOCKD *d, MV *ref_mv, int error_per_bit, int distance, vp8_variance_fn_ptr_t *fn_ptr, int *mvcost[2], int *mvsadcost[2]) int vp8_full_search_sadx3(MACROBLOCK *x, BLOCK *b, BLOCKD *d, MV *ref_mv, int error_per_bit, int distance, vp8_variance_fn_ptr_t *fn_ptr, int *mvcost[2], int *mvsadcost[2], MV *center_mv)
{ {
unsigned char *what = (*(b->base_src) + b->src); unsigned char *what = (*(b->base_src) + b->src);
int what_stride = b->src_stride; int what_stride = b->src_stride;
...@@ -1301,7 +1303,7 @@ int vp8_full_search_sadx3(MACROBLOCK *x, BLOCK *b, BLOCKD *d, MV *ref_mv, int er ...@@ -1301,7 +1303,7 @@ int vp8_full_search_sadx3(MACROBLOCK *x, BLOCK *b, BLOCKD *d, MV *ref_mv, int er
(ref_row > x->mv_row_min) && (ref_row < x->mv_row_max)) (ref_row > x->mv_row_min) && (ref_row < x->mv_row_max))
{ {
// Baseline value at the centre // Baseline value at the centre
bestsad = fn_ptr->sdf(what, what_stride, bestaddress, in_what_stride, 0x7fffffff) + vp8_mv_err_cost(ref_mv, ref_mv, mvsadcost, error_per_bit); bestsad = fn_ptr->sdf(what, what_stride, bestaddress, in_what_stride, 0x7fffffff) + vp8_mv_err_cost(ref_mv, center_mv, mvsadcost, error_per_bit);
} }
// Apply further limits to prevent us looking using vectors that stretch beyiond the UMV border // Apply further limits to prevent us looking using vectors that stretch beyiond the UMV border
...@@ -1336,7 +1338,7 @@ int vp8_full_search_sadx3(MACROBLOCK *x, BLOCK *b, BLOCKD *d, MV *ref_mv, int er ...@@ -1336,7 +1338,7 @@ int vp8_full_search_sadx3(MACROBLOCK *x, BLOCK *b, BLOCKD *d, MV *ref_mv, int er
if (thissad < bestsad) if (thissad < bestsad)
{ {
this_mv.col = c << 3; this_mv.col = c << 3;
thissad += vp8_mv_err_cost(&this_mv, ref_mv, mvsadcost, error_per_bit); thissad += vp8_mv_err_cost(&this_mv, center_mv, mvsadcost, error_per_bit);
if (thissad < bestsad) if (thissad < bestsad)
{ {
...@@ -1359,7 +1361,7 @@ int vp8_full_search_sadx3(MACROBLOCK *x, BLOCK *b, BLOCKD *d, MV *ref_mv, int er ...@@ -1359,7 +1361,7 @@ int vp8_full_search_sadx3(MACROBLOCK *x, BLOCK *b, BLOCKD *d, MV *ref_mv, int er
if (thissad < bestsad) if (thissad < bestsad)
{ {
this_mv.col = c << 3; this_mv.col = c << 3;
thissad += vp8_mv_err_cost(&this_mv, ref_mv, mvsadcost, error_per_bit); thissad += vp8_mv_err_cost(&this_mv, center_mv, mvsadcost, error_per_bit);
if (thissad < bestsad) if (thissad < bestsad)
{ {
...@@ -1381,14 +1383,14 @@ int vp8_full_search_sadx3(MACROBLOCK *x, BLOCK *b, BLOCKD *d, MV *ref_mv, int er ...@@ -1381,14 +1383,14 @@ int vp8_full_search_sadx3(MACROBLOCK *x, BLOCK *b, BLOCKD *d, MV *ref_mv, int er
if (bestsad < INT_MAX) if (bestsad < INT_MAX)
return fn_ptr->vf(what, what_stride, bestaddress, in_what_stride, (unsigned int *)(&thissad)) return fn_ptr->vf(what, what_stride, bestaddress, in_what_stride, (unsigned int *)(&thissad))
+ vp8_mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit); + vp8_mv_err_cost(&this_mv, center_mv, mvcost, error_per_bit);
else else
return INT_MAX; return INT_MAX;
} }
#endif #endif
int vp8_full_search_sadx8(MACROBLOCK *x, BLOCK *b, BLOCKD *d, MV *ref_mv, int error_per_bit, int distance, vp8_variance_fn_ptr_t *fn_ptr, int *mvcost[2], int *mvsadcost[2]) int vp8_full_search_sadx8(MACROBLOCK *x, BLOCK *b, BLOCKD *d, MV *ref_mv, int error_per_bit, int distance, vp8_variance_fn_ptr_t *fn_ptr, int *mvcost[2], int *mvsadcost[2], MV *center_mv)
{ {
unsigned char *what = (*(b->base_src) + b->src); unsigned char *what = (*(b->base_src) + b->src);
int what_stride = b->src_stride; int what_stride = b->src_stride;
...@@ -1427,7 +1429,7 @@ int vp8_full_search_sadx8(MACROBLOCK *x, BLOCK *b, BLOCKD *d, MV *ref_mv, int er ...@@ -1427,7 +1429,7 @@ int vp8_full_search_sadx8(MACROBLOCK *x, BLOCK *b, BLOCKD *d, MV *ref_mv, int er
(ref_row > x->mv_row_min) && (ref_row < x->mv_row_max)) (ref_row > x->mv_row_min) && (ref_row < x->mv_row_max))
{ {
// Baseline value at the centre // Baseline value at the centre
bestsad = fn_ptr->sdf(what, what_stride, bestaddress, in_what_stride, 0x7fffffff) + vp8_mv_err_cost(ref_mv, ref_mv, mvsadcost, error_per_bit); bestsad = fn_ptr->sdf(what, what_stride, bestaddress, in_what_stride, 0x7fffffff) + vp8_mv_err_cost(ref_mv, center_mv, mvsadcost, error_per_bit);
} }
// Apply further limits to prevent us looking using vectors that stretch beyiond the UMV border // Apply further limits to prevent us looking using vectors that stretch beyiond the UMV border
...@@ -1462,7 +1464,7 @@ int vp8_full_search_sadx8(MACROBLOCK *x, BLOCK *b, BLOCKD *d, MV *ref_mv, int er ...@@ -1462,7 +1464,7 @@ int vp8_full_search_sadx8(MACROBLOCK *x, BLOCK *b, BLOCKD *d, MV *ref_mv, int er
if (thissad < bestsad) if (thissad < bestsad)
{ {
this_mv.col = c << 3; this_mv.col = c << 3;
thissad += vp8_mv_err_cost(&this_mv, ref_mv, mvsadcost, error_per_bit); thissad += vp8_mv_err_cost(&this_mv, center_mv, mvsadcost, error_per_bit);
if (thissad < bestsad) if (thissad < bestsad)
{ {
...@@ -1491,7 +1493,7 @@ int vp8_full_search_sadx8(MACROBLOCK *x, BLOCK *b, BLOCKD *d, MV *ref_mv, int er ...@@ -1491,7 +1493,7 @@ int vp8_full_search_sadx8(MACROBLOCK *x, BLOCK *b, BLOCKD *d, MV *ref_mv, int er
if (thissad < bestsad) if (thissad < bestsad)
{ {
this_mv.col = c << 3; this_mv.col = c << 3;
thissad += vp8_mv_err_cost(&this_mv, ref_mv, mvsadcost, error_per_bit); thissad += vp8_mv_err_cost(&this_mv, center_mv, mvsadcost, error_per_bit);
if (thissad < bestsad) if (thissad < bestsad)
{ {
...@@ -1514,7 +1516,7 @@ int vp8_full_search_sadx8(MACROBLOCK *x, BLOCK *b, BLOCKD *d, MV *ref_mv, int er ...@@ -1514,7 +1516,7 @@ int vp8_full_search_sadx8(MACROBLOCK *x, BLOCK *b, BLOCKD *d, MV *ref_mv, int er
if (thissad < bestsad) if (thissad < bestsad)
{ {
this_mv.col = c << 3; this_mv.col = c << 3;
thissad += vp8_mv_err_cost(&this_mv, ref_mv, mvsadcost, error_per_bit); thissad += vp8_mv_err_cost(&this_mv, center_mv, mvsadcost, error_per_bit);
if (thissad < bestsad) if (thissad < bestsad)
{ {
...@@ -1535,7 +1537,7 @@ int vp8_full_search_sadx8(MACROBLOCK *x, BLOCK *b, BLOCKD *d, MV *ref_mv, int er ...@@ -1535,7 +1537,7 @@ int vp8_full_search_sadx8(MACROBLOCK *x, BLOCK *b, BLOCKD *d, MV *ref_mv, int er
if (bestsad < INT_MAX) if (bestsad < INT_MAX)
return fn_ptr->vf(what, what_stride, bestaddress, in_what_stride, (unsigned int *)(&thissad)) return fn_ptr->vf(what, what_stride, bestaddress, in_what_stride, (unsigned int *)(&thissad))
+ vp8_mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit); + vp8_mv_err_cost(&this_mv, center_mv, mvcost, error_per_bit);
else else
return INT_MAX; return INT_MAX;
} }
......
...@@ -67,7 +67,8 @@ extern fractional_mv_step_fp vp8_skip_fractional_mv_step; ...@@ -67,7 +67,8 @@ extern fractional_mv_step_fp vp8_skip_fractional_mv_step;
int distance, \ int distance, \
vp8_variance_fn_ptr_t *fn_ptr, \ vp8_variance_fn_ptr_t *fn_ptr, \
int *mvcost[2], \ int *mvcost[2], \
int *mvsadcost[2] \ int *mvsadcost[2], \
MV *center_mv \
) )
#define prototype_diamond_search_sad(sym)\ #define prototype_diamond_search_sad(sym)\
...@@ -83,7 +84,8 @@ extern fractional_mv_step_fp vp8_skip_fractional_mv_step; ...@@ -83,7 +84,8 @@ extern fractional_mv_step_fp vp8_skip_fractional_mv_step;
int *num00, \ int *num00, \
vp8_variance_fn_ptr_t *fn_ptr, \ vp8_variance_fn_ptr_t *fn_ptr, \
int *mvsadcost[2], \ int *mvsadcost[2], \
int *mvcost[2] \ int *mvcost[2], \
MV *center_mv \
) )
#if ARCH_X86 || ARCH_X86_64 #if ARCH_X86 || ARCH_X86_64
......
...@@ -283,6 +283,21 @@ static void setup_features(VP8_COMP *cpi) ...@@ -283,6 +283,21 @@ static void setup_features(VP8_COMP *cpi)
void vp8_dealloc_compressor_data(VP8_COMP *cpi) void vp8_dealloc_compressor_data(VP8_COMP *cpi)
{ {
// Delete last frame MV storage buffers
if (cpi->lfmv != 0)
vpx_free(cpi->lfmv);
cpi->lfmv = 0;
if (cpi->lf_ref_frame_sign_bias != 0)
vpx_free(cpi->lf_ref_frame_sign_bias);
cpi->lf_ref_frame_sign_bias = 0;
if (cpi->lf_ref_frame != 0)
vpx_free(cpi->lf_ref_frame);
cpi->lf_ref_frame = 0;
// Delete sementation map // Delete sementation map
if (cpi->segmentation_map != 0) if (cpi->segmentation_map != 0)
...@@ -2145,7 +2160,10 @@ VP8_PTR vp8_create_compressor(VP8_CONFIG *oxcf) ...@@ -2145,7 +2160,10 @@ VP8_PTR vp8_create_compressor(VP8_CONFIG *oxcf)
cpi->alt_is_last = 0 ; cpi->alt_is_last = 0 ;
cpi->gold_is_alt = 0 ; cpi->gold_is_alt = 0 ;
// allocate memory for storing last frame's MVs for MV prediction.
CHECK_MEM_ERROR(cpi->lfmv, vpx_calloc((cpi->common.mb_rows+1) * (cpi->common.mb_cols+1), sizeof(int_mv)));
CHECK_MEM_ERROR(cpi->lf_ref_frame_sign_bias, vpx_calloc((cpi->common.mb_rows+1) * (cpi->common.mb_cols+1), sizeof(int)));
CHECK_MEM_ERROR(cpi->lf_ref_frame, vpx_calloc((cpi->common.mb_rows+1) * (cpi->common.mb_cols+1), sizeof(int)));
// Create the encoder segmentation map and set all entries to 0 // Create the encoder segmentation map and set all entries to 0
CHECK_MEM_ERROR(cpi->segmentation_map, vpx_calloc(cpi->common.mb_rows * cpi->common.mb_cols, 1)); CHECK_MEM_ERROR(cpi->segmentation_map, vpx_calloc(cpi->common.mb_rows * cpi->common.mb_cols, 1));
...@@ -4165,6 +4183,60 @@ static void encode_frame_to_data_rate ...@@ -4165,6 +4183,60 @@ static void encode_frame_to_data_rate
} }
#endif #endif
////////////////////////////////
////////////////////////////////
// This frame's MVs are saved and will be used in next frame's MV prediction.
if(cm->show_frame) //do not save for altref frame
{
int mb_row;
int mb_col;
MODE_INFO *tmp = cm->mip; //point to beginning of allocated MODE_INFO arrays.
//static int last_video_frame = 0;
/*
if (cm->current_video_frame == 0) //first frame: set to 0
{
for (mb_row = 0; mb_row < cm->mb_rows+1; mb_row ++)
{
for (mb_col = 0; mb_col < cm->mb_cols+1; mb_col ++)
{
cpi->lfmv[mb_col + mb_row*(cm->mode_info_stride)].as_int = 0;
cpi->lf_ref_frame_sign_bias[mb_col + mb_row*(cm->mode_info_stride)] = 0;
cpi->lf_ref_frame[mb_col + mb_row*(cm->mode_info_stride)] = 0;
}
}
}else
*/
if(cm->frame_type != KEY_FRAME)
{
for (mb_row = 0; mb_row < cm->mb_rows+1; mb_row ++)
{
for (mb_col = 0; mb_col < cm->mb_cols+1; mb_col ++)
{
if(tmp->mbmi.ref_frame != INTRA_FRAME)
cpi->lfmv[mb_col + mb_row*(cm->mode_info_stride)].as_int = tmp->mbmi.mv.as_int;
cpi->lf_ref_frame_sign_bias[mb_col + mb_row*(cm->mode_info_stride)] = cm->ref_frame_sign_bias[tmp->mbmi.ref_frame];
cpi->lf_ref_frame[mb_col + mb_row*(cm->mode_info_stride)] = tmp->mbmi.ref_frame;
//printf("[%d, %d] ", cpi->lfmv[mb_col + mb_row*(cm->mode_info_stride-1)].as_mv.row, cpi->lfmv[mb_col + mb_row*(cm->mode_info_stride-1)].as_mv.col);
tmp++;
}
}
//last_video_frame = cm->current_video_frame;
}
}
//printf("after: %d %d \n", cm->current_video_frame, cm->show_frame );
// Update the GF useage maps. // Update the GF useage maps.
// This is done after completing the compression of a frame when all modes etc. are finalized but before loop filter // This is done after completing the compression of a frame when all modes etc. are finalized but before loop filter
vp8_update_gf_useage_maps(cpi, cm, &cpi->mb); vp8_update_gf_useage_maps(cpi, cm, &cpi->mb);
......
...@@ -239,6 +239,12 @@ enum ...@@ -239,6 +239,12 @@ enum
BLOCK_MAX_SEGMENTS BLOCK_MAX_SEGMENTS
}; };
typedef union
{
unsigned int as_int;
MV as_mv;
} int_mv; /* facilitates rapid equality tests */
typedef struct typedef struct
{ {
...@@ -661,6 +667,10 @@ typedef struct ...@@ -661,6 +667,10 @@ typedef struct
unsigned char *gf_active_flags; // Record of which MBs still refer to last golden frame either directly or through 0,0 unsigned char *gf_active_flags; // Record of which MBs still refer to last golden frame either directly or through 0,0
int gf_active_count; int gf_active_count;
//Store last frame's MV info for next frame MV prediction
int_mv *lfmv;
int *lf_ref_frame_sign_bias;
int *lf_ref_frame;
} VP8_COMP; } VP8_COMP;
......
...@@ -685,7 +685,7 @@ int vp8_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset, int rec ...@@ -685,7 +685,7 @@ int vp8_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset, int rec
#if 0 #if 0
// Initial step Search // Initial step Search
bestsme = vp8_diamond_search_sad(x, b, d, &best_ref_mv1, &d->bmi.mv.as_mv, step_param, x->errorperbit, &num00, &cpi->fn_ptr, cpi->mb.mvsadcost, cpi->mb.mvcost); bestsme = vp8_diamond_search_sad(x, b, d, &best_ref_mv1, &d->bmi.mv.as_mv, step_param, x->errorperbit, &num00, &cpi->fn_ptr, cpi->mb.mvsadcost, cpi->mb.mvcost, &best_ref_mv1);
mode_mv[NEWMV].row = d->bmi.mv.as_mv.row; mode_mv[NEWMV].row = d->bmi.mv.as_mv.row;
mode_mv[NEWMV].col = d->bmi.mv.as_mv.col; mode_mv[NEWMV].col = d->bmi.mv.as_mv.col;
...@@ -698,7 +698,7 @@ int vp8_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset, int rec ...@@ -698,7 +698,7 @@ int vp8_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset, int rec
num00--; num00--;
else else
{ {
thissme = vp8_diamond_search_sad(x, b, d, &best_ref_mv1, &d->bmi.mv.as_mv, step_param + n, x->errorperbit, &num00, &cpi->fn_ptr, cpi->mb.mvsadcost, x->mvcost); thissme = vp8_diamond_search_sad(x, b, d, &best_ref_mv1, &d->bmi.mv.as_mv, step_param + n, x->errorperbit, &num00, &cpi->fn_ptr, cpi->mb.mvsadcost, x->mvcost, &best_ref_mv1);
if (thissme < bestsme) if (thissme < bestsme)
{ {
...@@ -724,7 +724,7 @@ int vp8_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset, int rec ...@@ -724,7 +724,7 @@ int vp8_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset, int rec
} }
else else
{ {
bestsme = cpi->diamond_search_sad(x, b, d, &best_ref_mv1, &d->bmi.mv.as_mv, step_param, sadpb / 2/*x->errorperbit*/, &num00, &cpi->fn_ptr[BLOCK_16X16], x->mvsadcost, x->mvcost); //sadpb < 9 bestsme = cpi->diamond_search_sad(x, b, d, &best_ref_mv1, &d->bmi.mv.as_mv, step_param, sadpb / 2/*x->errorperbit*/, &num00, &cpi->fn_ptr[BLOCK_16X16], x->mvsadcost, x->mvcost, &best_ref_mv1); //sadpb < 9
mode_mv[NEWMV].row = d->bmi.mv.as_mv.row; mode_mv[NEWMV].row = d->bmi.mv.as_mv.row;
mode_mv[NEWMV].col = d->bmi.mv.as_mv.col; mode_mv[NEWMV].col = d->bmi.mv.as_mv.col;
...@@ -743,7 +743,7 @@ int vp8_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset, int rec ...@@ -743,7 +743,7 @@ int vp8_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset, int rec
num00--; num00--;
else else
{ {
thissme = cpi->diamond_search_sad(x, b, d, &best_ref_mv1, &d->bmi.mv.as_mv, step_param + n, sadpb / 4/*x->errorperbit*/, &num00, &cpi->fn_ptr[BLOCK_16X16], x->mvsadcost, x->mvcost); //sadpb = 9 thissme = cpi->diamond_search_sad(x, b, d, &best_ref_mv1, &d->bmi.mv.as_mv, step_param + n, sadpb / 4/*x->errorperbit*/, &num00, &cpi->fn_ptr[BLOCK_16X16], x->mvsadcost, x->mvcost, &best_ref_mv1); //sadpb = 9
if (thissme < bestsme) if (thissme < bestsme)
{ {
......
...@@ -1236,7 +1236,7 @@ static int vp8_rd_pick_best_mbsegmentation(VP8_COMP *cpi, MACROBLOCK *x, MV *bes ...@@ -1236,7 +1236,7 @@ static int vp8_rd_pick_best_mbsegmentation(VP8_COMP *cpi, MACROBLOCK *x, MV *bes
bestsme = vp8_hex_search(x, c, e, best_ref_mv, &mode_mv[NEW4X4], step_param, sadpb/*x->errorperbit*/, &num00, v_fn_ptr, x->mvsadcost, mvcost); bestsme = vp8_hex_search(x, c, e, best_ref_mv, &mode_mv[NEW4X4], step_param, sadpb/*x->errorperbit*/, &num00, v_fn_ptr, x->mvsadcost, mvcost);
else else
{ {
bestsme = cpi->diamond_search_sad(x, c, e, best_ref_mv, &mode_mv[NEW4X4], step_param, sadpb / 2/*x->errorperbit*/, &num00, v_fn_ptr, x->mvsadcost, mvcost); bestsme = cpi->diamond_search_sad(x, c, e, best_ref_mv, &mode_mv[NEW4X4], step_param, sadpb / 2/*x->errorperbit*/, &num00, v_fn_ptr, x->mvsadcost, mvcost, best_ref_mv);
n = num00; n = num00;
num00 = 0; num00 = 0;
...@@ -1249,7 +1249,7 @@ static int vp8_rd_pick_best_mbsegmentation(VP8_COMP *cpi, MACROBLOCK *x, MV *bes ...@@ -1249,7 +1249,7 @@ static int vp8_rd_pick_best_mbsegmentation(VP8_COMP *cpi, MACROBLOCK *x, MV *bes
num00--; num00--;
else else
{ {
thissme = cpi->diamond_search_sad(x, c, e, best_ref_mv, &temp_mv, step_param + n, sadpb / 2/*x->errorperbit*/, &num00, v_fn_ptr, x->mvsadcost, mvcost); thissme = cpi->diamond_search_sad(x, c, e, best_ref_mv, &temp_mv, step_param + n, sadpb / 2/*x->errorperbit*/, &num00, v_fn_ptr, x->mvsadcost, mvcost, best_ref_mv);
if (thissme < bestsme) if (thissme < bestsme)
{ {
...@@ -1264,7 +1264,7 @@ static int vp8_rd_pick_best_mbsegmentation(VP8_COMP *cpi, MACROBLOCK *x, MV *bes ...@@ -1264,7 +1264,7 @@ static int vp8_rd_pick_best_mbsegmentation(VP8_COMP *cpi, MACROBLOCK *x, MV *bes
// Should we do a full search (best quality only) // Should we do a full search (best quality only)
if ((compressor_speed == 0) && (bestsme >> sseshift) > 4000) if ((compressor_speed == 0) && (bestsme >> sseshift) > 4000)
{ {
thissme = cpi->full_search_sad(x, c, e, best_ref_mv, sadpb / 4, 16, v_fn_ptr, x->mvcost, x->mvsadcost); thissme = cpi->full_search_sad(x, c, e, best_ref_mv, sadpb / 4, 16, v_fn_ptr, x->mvcost, x->mvsadcost, best_ref_mv);