Commit 4a1b6d81 authored by Jingning Han's avatar Jingning Han

Account context based prob model for motion vector cost estimate

This commit accounts for the context based probability model for
motion vector cost estimate in rate-distortion optimization.

Change-Id: I18961b25ef356e4751407a178702e9a7a1213100
parent a8c1d85e
......@@ -98,12 +98,21 @@ struct macroblock {
unsigned int pred_sse[MAX_REF_FRAMES];
int pred_mv_sad[MAX_REF_FRAMES];
#if CONFIG_REF_MV
int *nmvjointcost;
int nmv_vec_cost[NMV_CONTEXTS][MV_JOINTS];
int *nmvcost[NMV_CONTEXTS][2];
int *nmvcost_hp[NMV_CONTEXTS][2];
int **mv_cost_stack[NMV_CONTEXTS];
int *nmvjointsadcost;
#else
int nmvjointcost[MV_JOINTS];
int *nmvcost[2];
int *nmvcost_hp[2];
int nmvjointsadcost[MV_JOINTS];
#endif
int **mvcost;
int nmvjointsadcost[MV_JOINTS];
int *nmvsadcost[2];
int *nmvsadcost_hp[2];
int **mvsadcost;
......
......@@ -219,6 +219,22 @@ int av1_get_active_map(AV1_COMP *cpi, unsigned char *new_map_16x16, int rows,
void av1_set_high_precision_mv(AV1_COMP *cpi, int allow_high_precision_mv) {
MACROBLOCK *const mb = &cpi->td.mb;
cpi->common.allow_high_precision_mv = allow_high_precision_mv;
#if CONFIG_REF_MV
if (cpi->common.allow_high_precision_mv) {
int i;
for (i = 0; i < NMV_CONTEXTS; ++i) {
mb->mv_cost_stack[i] = mb->nmvcost_hp[i];
mb->mvsadcost = mb->nmvsadcost_hp;
}
} else {
int i;
for (i = 0; i < NMV_CONTEXTS; ++i) {
mb->mv_cost_stack[i] = mb->nmvcost[i];
mb->mvsadcost = mb->nmvsadcost;
}
}
#else
if (cpi->common.allow_high_precision_mv) {
mb->mvcost = mb->nmvcost_hp;
mb->mvsadcost = mb->nmvsadcost_hp;
......@@ -226,6 +242,7 @@ void av1_set_high_precision_mv(AV1_COMP *cpi, int allow_high_precision_mv) {
mb->mvcost = mb->nmvcost;
mb->mvsadcost = mb->nmvsadcost;
}
#endif
}
static void setup_frame(AV1_COMP *cpi) {
......@@ -331,6 +348,9 @@ void av1_initialize_enc(void) {
static void dealloc_compressor_data(AV1_COMP *cpi) {
AV1_COMMON *const cm = &cpi->common;
#if CONFIG_REF_MV
int i;
#endif
aom_free(cpi->mbmi_ext_base);
cpi->mbmi_ext_base = NULL;
......@@ -344,6 +364,19 @@ static void dealloc_compressor_data(AV1_COMP *cpi) {
aom_free(cpi->coding_context.last_frame_seg_map_copy);
cpi->coding_context.last_frame_seg_map_copy = NULL;
#if CONFIG_REF_MV
for (i = 0; i < NMV_CONTEXTS; ++i) {
aom_free(cpi->nmv_costs[i][0]);
aom_free(cpi->nmv_costs[i][1]);
aom_free(cpi->nmv_costs_hp[i][0]);
aom_free(cpi->nmv_costs_hp[i][1]);
cpi->nmv_costs[i][0] = NULL;
cpi->nmv_costs[i][1] = NULL;
cpi->nmv_costs_hp[i][0] = NULL;
cpi->nmv_costs_hp[i][1] = NULL;
}
#endif
aom_free(cpi->nmvcosts[0]);
aom_free(cpi->nmvcosts[1]);
cpi->nmvcosts[0] = NULL;
......@@ -393,12 +426,29 @@ static void dealloc_compressor_data(AV1_COMP *cpi) {
static void save_coding_context(AV1_COMP *cpi) {
CODING_CONTEXT *const cc = &cpi->coding_context;
AV1_COMMON *cm = &cpi->common;
#if CONFIG_REF_MV
int i;
#endif
// Stores a snapshot of key state variables which can subsequently be
// restored with a call to av1_restore_coding_context. These functions are
// intended for use in a re-code loop in av1_compress_frame where the
// quantizer value is adjusted between loop iterations.
#if CONFIG_REF_MV
for (i = 0; i < NMV_CONTEXTS; ++i) {
av1_copy(cc->nmv_vec_cost[i], cpi->td.mb.nmv_vec_cost[i]);
memcpy(cc->nmv_costs[i][0], cpi->nmv_costs[i][0],
MV_VALS * sizeof(*cpi->nmv_costs[i][0]));
memcpy(cc->nmv_costs[i][1], cpi->nmv_costs[i][1],
MV_VALS * sizeof(*cpi->nmv_costs[i][1]));
memcpy(cc->nmv_costs_hp[i][0], cpi->nmv_costs_hp[i][0],
MV_VALS * sizeof(*cpi->nmv_costs_hp[i][0]));
memcpy(cc->nmv_costs_hp[i][1], cpi->nmv_costs_hp[i][1],
MV_VALS * sizeof(*cpi->nmv_costs_hp[i][1]));
}
#else
av1_copy(cc->nmvjointcost, cpi->td.mb.nmvjointcost);
#endif
memcpy(cc->nmvcosts[0], cpi->nmvcosts[0],
MV_VALS * sizeof(*cpi->nmvcosts[0]));
......@@ -425,10 +475,27 @@ static void save_coding_context(AV1_COMP *cpi) {
static void restore_coding_context(AV1_COMP *cpi) {
CODING_CONTEXT *const cc = &cpi->coding_context;
AV1_COMMON *cm = &cpi->common;
#if CONFIG_REF_MV
int i;
#endif
// Restore key state variables to the snapshot state stored in the
// previous call to av1_save_coding_context.
#if CONFIG_REF_MV
for (i = 0; i < NMV_CONTEXTS; ++i) {
av1_copy(cpi->td.mb.nmv_vec_cost[i], cc->nmv_vec_cost[i]);
memcpy(cpi->nmv_costs[i][0], cc->nmv_costs[i][0],
MV_VALS * sizeof(*cc->nmv_costs[i][0]));
memcpy(cpi->nmv_costs[i][1], cc->nmv_costs[i][1],
MV_VALS * sizeof(*cc->nmv_costs[i][1]));
memcpy(cpi->nmv_costs_hp[i][0], cc->nmv_costs_hp[i][0],
MV_VALS * sizeof(*cc->nmv_costs_hp[i][0]));
memcpy(cpi->nmv_costs_hp[i][1], cc->nmv_costs_hp[i][1],
MV_VALS * sizeof(*cc->nmv_costs_hp[i][1]));
}
#else
av1_copy(cpi->td.mb.nmvjointcost, cc->nmvjointcost);
#endif
memcpy(cpi->nmvcosts[0], cc->nmvcosts[0], MV_VALS * sizeof(*cc->nmvcosts[0]));
memcpy(cpi->nmvcosts[1], cc->nmvcosts[1], MV_VALS * sizeof(*cc->nmvcosts[1]));
......@@ -1320,12 +1387,14 @@ void av1_change_config(struct AV1_COMP *cpi, const AV1EncoderConfig *oxcf) {
#endif
#define log2f(x) (log(x) / (float)M_LOG2_E)
#if !CONFIG_REF_MV
static void cal_nmvjointsadcost(int *mvjointsadcost) {
mvjointsadcost[0] = 600;
mvjointsadcost[1] = 300;
mvjointsadcost[2] = 300;
mvjointsadcost[3] = 300;
}
#endif
static void cal_nmvsadcosts(int *mvsadcost[2]) {
int i = 1;
......@@ -1396,6 +1465,18 @@ AV1_COMP *av1_create_compressor(AV1EncoderConfig *oxcf,
cpi->tile_data = NULL;
realloc_segmentation_maps(cpi);
#if CONFIG_REF_MV
for (i = 0; i < NMV_CONTEXTS; ++i) {
CHECK_MEM_ERROR(cm, cpi->nmv_costs[i][0],
aom_calloc(MV_VALS, sizeof(*cpi->nmv_costs[i][0])));
CHECK_MEM_ERROR(cm, cpi->nmv_costs[i][1],
aom_calloc(MV_VALS, sizeof(*cpi->nmv_costs[i][1])));
CHECK_MEM_ERROR(cm, cpi->nmv_costs_hp[i][0],
aom_calloc(MV_VALS, sizeof(*cpi->nmv_costs_hp[i][0])));
CHECK_MEM_ERROR(cm, cpi->nmv_costs_hp[i][1],
aom_calloc(MV_VALS, sizeof(*cpi->nmv_costs_hp[i][1])));
}
#endif
CHECK_MEM_ERROR(cm, cpi->nmvcosts[0],
aom_calloc(MV_VALS, sizeof(*cpi->nmvcosts[0])));
......@@ -1483,15 +1564,25 @@ AV1_COMP *av1_create_compressor(AV1EncoderConfig *oxcf,
cpi->first_time_stamp_ever = INT64_MAX;
#if CONFIG_REF_MV
for (i = 0; i < NMV_CONTEXTS; ++i) {
cpi->td.mb.nmvcost[i][0] = &cpi->nmv_costs[i][0][MV_MAX];
cpi->td.mb.nmvcost[i][1] = &cpi->nmv_costs[i][1][MV_MAX];
cpi->td.mb.nmvcost_hp[i][0] = &cpi->nmv_costs_hp[i][0][MV_MAX];
cpi->td.mb.nmvcost_hp[i][1] = &cpi->nmv_costs_hp[i][1][MV_MAX];
}
#else
cal_nmvjointsadcost(cpi->td.mb.nmvjointsadcost);
cpi->td.mb.nmvcost[0] = &cpi->nmvcosts[0][MV_MAX];
cpi->td.mb.nmvcost[1] = &cpi->nmvcosts[1][MV_MAX];
cpi->td.mb.nmvcost_hp[0] = &cpi->nmvcosts_hp[0][MV_MAX];
cpi->td.mb.nmvcost_hp[1] = &cpi->nmvcosts_hp[1][MV_MAX];
#endif
cpi->td.mb.nmvsadcost[0] = &cpi->nmvsadcosts[0][MV_MAX];
cpi->td.mb.nmvsadcost[1] = &cpi->nmvsadcosts[1][MV_MAX];
cal_nmvsadcosts(cpi->td.mb.nmvsadcost);
cpi->td.mb.nmvcost_hp[0] = &cpi->nmvcosts_hp[0][MV_MAX];
cpi->td.mb.nmvcost_hp[1] = &cpi->nmvcosts_hp[1][MV_MAX];
cpi->td.mb.nmvsadcost_hp[0] = &cpi->nmvsadcosts_hp[0][MV_MAX];
cpi->td.mb.nmvsadcost_hp[1] = &cpi->nmvsadcosts_hp[1][MV_MAX];
cal_nmvsadcosts_hp(cpi->td.mb.nmvsadcost_hp);
......
......@@ -51,6 +51,12 @@ typedef struct {
int nmvcosts[2][MV_VALS];
int nmvcosts_hp[2][MV_VALS];
#if CONFIG_REF_MV
int nmv_vec_cost[NMV_CONTEXTS][MV_JOINTS];
int nmv_costs[NMV_CONTEXTS][2][MV_VALS];
int nmv_costs_hp[NMV_CONTEXTS][2][MV_VALS];
#endif
#if !CONFIG_MISC_FIXES
aom_prob segment_pred_probs[PREDICTION_PROBS];
#endif
......@@ -332,6 +338,11 @@ typedef struct AV1_COMP {
CODING_CONTEXT coding_context;
#if CONFIG_REF_MV
int *nmv_costs[NMV_CONTEXTS][2];
int *nmv_costs_hp[NMV_CONTEXTS][2];
#endif
int *nmvcosts[2];
int *nmvcosts_hp[2];
int *nmvsadcosts[2];
......
......@@ -92,10 +92,19 @@ static int mv_err_cost(const MV *mv, const MV *ref, const int *mvjcost,
static int mvsad_err_cost(const MACROBLOCK *x, const MV *mv, const MV *ref,
int sad_per_bit) {
#if CONFIG_REF_MV
const MV diff = { (mv->row - ref->row) << 3,
(mv->col - ref->col) << 3 };
return ROUND_POWER_OF_TWO(
(unsigned)mv_cost(&diff, x->nmvjointsadcost, x->mvsadcost) *
sad_per_bit,
AV1_PROB_COST_SHIFT);
#else
const MV diff = { mv->row - ref->row, mv->col - ref->col };
return ROUND_POWER_OF_TWO(
(unsigned)mv_cost(&diff, x->nmvjointsadcost, x->nmvsadcost) * sad_per_bit,
AV1_PROB_COST_SHIFT);
#endif
}
void av1_init_dsmotion_compensation(search_site_config *cfg, int stride) {
......
......@@ -258,6 +258,18 @@ static void set_block_thresholds(const AV1_COMMON *cm, RD_OPT *rd) {
}
}
#if CONFIG_REF_MV
void av1_set_mvcost(MACROBLOCK *x, MV_REFERENCE_FRAME ref_frame) {
MB_MODE_INFO_EXT *mbmi_ext = x->mbmi_ext;
int nmv_ctx = av1_nmv_ctx(mbmi_ext->ref_mv_count[ref_frame],
mbmi_ext->ref_mv_stack[ref_frame]);
x->mvcost = x->mv_cost_stack[nmv_ctx];
x->nmvjointcost = x->nmv_vec_cost[nmv_ctx];
x->mvsadcost = x->mvcost;
x->nmvjointsadcost = x->nmvjointcost;
}
#endif
void av1_initialize_rd_consts(AV1_COMP *cpi) {
AV1_COMMON *const cm = &cpi->common;
MACROBLOCK *const x = &cpi->td.mb;
......@@ -291,12 +303,18 @@ void av1_initialize_rd_consts(AV1_COMP *cpi) {
if (!frame_is_intra_only(cm)) {
#if CONFIG_REF_MV
int nmv_ctx = 0;
av1_build_nmv_cost_table(x->nmvjointcost,
cm->allow_high_precision_mv ? x->nmvcost_hp
: x->nmvcost,
&cm->fc->nmvc[nmv_ctx],
cm->allow_high_precision_mv);
int nmv_ctx;
for (nmv_ctx = 0; nmv_ctx < NMV_CONTEXTS; ++nmv_ctx) {
av1_build_nmv_cost_table(x->nmv_vec_cost[nmv_ctx],
cm->allow_high_precision_mv ?
x->nmvcost_hp[nmv_ctx] : x->nmvcost[nmv_ctx],
&cm->fc->nmvc[nmv_ctx],
cm->allow_high_precision_mv);
}
x->mvcost = x->mv_cost_stack[0];
x->nmvjointcost = x->nmv_vec_cost[0];
x->mvsadcost = x->mvcost;
x->nmvjointsadcost = x->nmvjointcost;
#else
av1_build_nmv_cost_table(x->nmvjointcost,
cm->allow_high_precision_mv ? x->nmvcost_hp
......
......@@ -154,6 +154,10 @@ YV12_BUFFER_CONFIG *av1_get_scaled_ref_frame(const struct AV1_COMP *cpi,
void av1_init_me_luts(void);
#if CONFIG_REF_MV
void av1_set_mvcost(MACROBLOCK *x, MV_REFERENCE_FRAME ref_frame);
#endif
void av1_get_entropy_contexts(BLOCK_SIZE bsize, TX_SIZE tx_size,
const struct macroblockd_plane *pd,
ENTROPY_CONTEXT t_above[16],
......
......@@ -1672,6 +1672,9 @@ static void joint_motion_search(AV1_COMP *cpi, MACROBLOCK *x, BLOCK_SIZE bsize,
tmp_mv.col >>= 3;
tmp_mv.row >>= 3;
#if CONFIG_REF_MV
av1_set_mvcost(x, refs[id]);
#endif
// Small-range full-pixel motion search.
bestsme = av1_refining_search_8p_c(x, &tmp_mv, sadpb, search_range,
&cpi->fn_ptr[bsize], &ref_mv[id].as_mv,
......@@ -1863,6 +1866,9 @@ static int64_t rd_pick_best_sub8x8_mode(
av1_set_mv_search_range(x, &bsi->ref_mv[0]->as_mv);
#if CONFIG_REF_MV
av1_set_mvcost(x, mbmi->ref_frame[0]);
#endif
bestsme = av1_full_pixel_search(
cpi, x, bsize, &mvp_full, step_param, sadpb,
cpi->sf.mv.subpel_search_method != SUBPEL_TREE ? cost_list : NULL,
......@@ -2221,6 +2227,10 @@ static void single_motion_search(AV1_COMP *cpi, MACROBLOCK *x,
av1_set_mv_search_range(x, &ref_mv);
#if CONFIG_REF_MV
av1_set_mvcost(x, ref);
#endif
// Work out the size of the first step in the mv step search.
// 0 here is maximum length first step. 1 is AOMMAX >> 1 etc.
if (cpi->sf.mv.auto_mv_step_size && cm->show_frame) {
......
......@@ -195,10 +195,17 @@ static int temporal_filter_find_matching_mb_c(AV1_COMP *cpi,
step_param = mv_sf->reduce_first_step_size;
step_param = AOMMIN(step_param, MAX_MVSEARCH_STEPS - 2);
#if CONFIG_REF_MV
x->mvcost = x->mv_cost_stack[0];
x->nmvjointcost = x->nmv_vec_cost[0];
x->mvsadcost = x->mvcost;
x->nmvjointsadcost = x->nmvjointcost;
#endif
// Ignore mv costing by sending NULL pointer instead of cost arrays
av1_hex_search(x, &best_ref_mv1_full, step_param, sadpb, 1,
cond_cost_list(cpi, cost_list), &cpi->fn_ptr[BLOCK_16X16], 0,
&best_ref_mv1, ref_mv);
cond_cost_list(cpi, cost_list), &cpi->fn_ptr[BLOCK_16X16], 0,
&best_ref_mv1, ref_mv);
// Ignore mv costing by sending NULL pointer instead of cost array
bestsme = cpi->find_fractional_mv_step(
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment