Commit 9c7a0090 authored by Scott LaVarnway's avatar Scott LaVarnway

Removed unnecessary MB_MODE_INFO copies

These copies occurred for each macroblock in the encoder and decoder.
Thetemp MB_MODE_INFO mbmi was removed from MACROBLOCKD.  As a result,
a large number compile errors had to be fixed.

Change-Id: I4cf0ffae3ce244f6db04a4c217d52dd256382cf3
parent f5615b61
......@@ -215,7 +215,7 @@ typedef struct
{
DECLARE_ALIGNED(16, short, diff[400]); // from idct diff
DECLARE_ALIGNED(16, unsigned char, predictor[384]);
DECLARE_ALIGNED(16, short, reference[384]);
//not used DECLARE_ALIGNED(16, short, reference[384]);
DECLARE_ALIGNED(16, short, qcoeff[400]);
DECLARE_ALIGNED(16, short, dqcoeff[400]);
......@@ -232,8 +232,6 @@ typedef struct
FRAME_TYPE frame_type;
MB_MODE_INFO mbmi;
int up_available;
int left_available;
......
......@@ -65,7 +65,8 @@ void vp8_inverse_transform_mb(const vp8_idct_rtcd_vtable_t *rtcd, MACROBLOCKD *x
{
int i;
if (x->mbmi.mode != B_PRED && x->mbmi.mode != SPLITMV)
if (x->mode_info_context->mbmi.mode != B_PRED &&
x->mode_info_context->mbmi.mode != SPLITMV)
{
// do 2nd order transform on the dc block
......
......@@ -210,7 +210,8 @@ void vp8_build_inter_predictors_mbuv(MACROBLOCKD *x)
{
int i;
if (x->mbmi.ref_frame != INTRA_FRAME && x->mbmi.mode != SPLITMV)
if (x->mode_info_context->mbmi.ref_frame != INTRA_FRAME &&
x->mode_info_context->mbmi.mode != SPLITMV)
{
unsigned char *uptr, *vptr;
unsigned char *upred_ptr = &x->predictor[256];
......@@ -254,16 +255,18 @@ void vp8_build_inter_predictors_mbuv(MACROBLOCKD *x)
}
}
//encoder only
void vp8_build_inter_predictors_mby(MACROBLOCKD *x)
{
if (x->mbmi.ref_frame != INTRA_FRAME && x->mbmi.mode != SPLITMV)
if (x->mode_info_context->mbmi.ref_frame != INTRA_FRAME &&
x->mode_info_context->mbmi.mode != SPLITMV)
{
unsigned char *ptr_base;
unsigned char *ptr;
unsigned char *pred_ptr = x->predictor;
int mv_row = x->mbmi.mv.as_mv.row;
int mv_col = x->mbmi.mv.as_mv.col;
int mv_row = x->mode_info_context->mbmi.mv.as_mv.row;
int mv_col = x->mode_info_context->mbmi.mv.as_mv.col;
int pre_stride = x->block[0].pre_stride;
ptr_base = x->pre.y_buffer;
......@@ -282,7 +285,7 @@ void vp8_build_inter_predictors_mby(MACROBLOCKD *x)
{
int i;
if (x->mbmi.partitioning < 3)
if (x->mode_info_context->mbmi.partitioning < 3)
{
for (i = 0; i < 4; i++)
{
......@@ -313,7 +316,9 @@ void vp8_build_inter_predictors_mby(MACROBLOCKD *x)
void vp8_build_inter_predictors_mb(MACROBLOCKD *x)
{
if (x->mbmi.ref_frame != INTRA_FRAME && x->mbmi.mode != SPLITMV)
if (x->mode_info_context->mbmi.ref_frame != INTRA_FRAME &&
x->mode_info_context->mbmi.mode != SPLITMV)
{
int offset;
unsigned char *ptr_base;
......@@ -323,8 +328,8 @@ void vp8_build_inter_predictors_mb(MACROBLOCKD *x)
unsigned char *upred_ptr = &x->predictor[256];
unsigned char *vpred_ptr = &x->predictor[320];
int mv_row = x->mbmi.mv.as_mv.row;
int mv_col = x->mbmi.mv.as_mv.col;
int mv_row = x->mode_info_context->mbmi.mv.as_mv.row;
int mv_col = x->mode_info_context->mbmi.mv.as_mv.col;
int pre_stride = x->block[0].pre_stride;
ptr_base = x->pre.y_buffer;
......@@ -361,7 +366,7 @@ void vp8_build_inter_predictors_mb(MACROBLOCKD *x)
{
int i;
if (x->mbmi.partitioning < 3)
if (x->mode_info_context->mbmi.partitioning < 3)
{
for (i = 0; i < 4; i++)
{
......@@ -410,7 +415,7 @@ void vp8_build_uvmvs(MACROBLOCKD *x, int fullpixel)
{
int i, j;
if (x->mbmi.mode == SPLITMV)
if (x->mode_info_context->mbmi.mode == SPLITMV)
{
for (i = 0; i < 2; i++)
{
......@@ -455,8 +460,8 @@ void vp8_build_uvmvs(MACROBLOCKD *x, int fullpixel)
}
else
{
int mvrow = x->mbmi.mv.as_mv.row;
int mvcol = x->mbmi.mv.as_mv.col;
int mvrow = x->mode_info_context->mbmi.mv.as_mv.row;
int mvcol = x->mode_info_context->mbmi.mv.as_mv.col;
if (mvrow < 0)
mvrow -= 1;
......@@ -535,7 +540,7 @@ void vp8_build_inter_predictors_mb_s(MACROBLOCKD *x)
unsigned char *pred_ptr = x->predictor;
unsigned char *dst_ptr = x->dst.y_buffer;
if (x->mbmi.mode != SPLITMV)
if (x->mode_info_context->mbmi.mode != SPLITMV)
{
int offset;
unsigned char *ptr_base;
......@@ -547,8 +552,8 @@ void vp8_build_inter_predictors_mb_s(MACROBLOCKD *x)
unsigned char *udst_ptr = x->dst.u_buffer;
unsigned char *vdst_ptr = x->dst.v_buffer;
int mv_row = x->mbmi.mv.as_mv.row;
int mv_col = x->mbmi.mv.as_mv.col;
int mv_row = x->mode_info_context->mbmi.mv.as_mv.row;
int mv_col = x->mode_info_context->mbmi.mv.as_mv.col;
int pre_stride = x->dst.y_stride; //x->block[0].pre_stride;
ptr_base = x->pre.y_buffer;
......@@ -587,7 +592,7 @@ void vp8_build_inter_predictors_mb_s(MACROBLOCKD *x)
//if sth is wrong, go back to what it is in build_inter_predictors_mb.
int i;
if (x->mbmi.partitioning < 3)
if (x->mode_info_context->mbmi.partitioning < 3)
{
for (i = 0; i < 4; i++)
{
......
......@@ -43,7 +43,7 @@ void vp8_build_intra_predictors_mby(MACROBLOCKD *x)
}
// for Y
switch (x->mbmi.mode)
switch (x->mode_info_context->mbmi.mode)
{
case DC_PRED:
{
......@@ -164,7 +164,7 @@ void vp8_build_intra_predictors_mby_s(MACROBLOCKD *x)
}
// for Y
switch (x->mbmi.mode)
switch (x->mode_info_context->mbmi.mode)
{
case DC_PRED:
{
......@@ -290,7 +290,7 @@ void vp8_build_intra_predictors_mbuv(MACROBLOCKD *x)
vleft_col[i] = x->dst.v_buffer [i* x->dst.uv_stride -1];
}
switch (x->mbmi.uv_mode)
switch (x->mode_info_context->mbmi.uv_mode)
{
case DC_PRED:
{
......@@ -430,7 +430,7 @@ void vp8_build_intra_predictors_mbuv_s(MACROBLOCKD *x)
vleft_col[i] = x->dst.v_buffer [i* x->dst.uv_stride -1];
}
switch (x->mbmi.uv_mode)
switch (x->mode_info_context->mbmi.uv_mode)
{
case DC_PRED:
{
......
......@@ -113,7 +113,7 @@ static void mb_init_dequantizer(VP8D_COMP *pbi, MACROBLOCKD *xd)
// to dst buffer, we can write the result directly to dst buffer. This eliminates unnecessary copy.
static void skip_recon_mb(VP8D_COMP *pbi, MACROBLOCKD *xd)
{
if (xd->frame_type == KEY_FRAME || xd->mbmi.ref_frame == INTRA_FRAME)
if (xd->frame_type == KEY_FRAME || xd->mode_info_context->mbmi.ref_frame == INTRA_FRAME)
{
vp8_build_intra_predictors_mbuv_s(xd);
......@@ -164,7 +164,7 @@ static void clamp_uvmv_to_umv_border(MV *mv, const MACROBLOCKD *xd)
static void clamp_mvs(MACROBLOCKD *xd)
{
if (xd->mbmi.mode == SPLITMV)
if (xd->mode_info_context->mbmi.mode == SPLITMV)
{
int i;
......@@ -175,7 +175,7 @@ static void clamp_mvs(MACROBLOCKD *xd)
}
else
{
clamp_mv_to_umv_border(&xd->mbmi.mv.as_mv, xd);
clamp_mv_to_umv_border(&xd->mode_info_context->mbmi.mv.as_mv, xd);
clamp_uvmv_to_umv_border(&xd->block[16].bmi.mv.as_mv, xd);
}
......@@ -184,10 +184,9 @@ static void clamp_mvs(MACROBLOCKD *xd)
void vp8_decode_macroblock(VP8D_COMP *pbi, MACROBLOCKD *xd)
{
int eobtotal = 0;
MV orig_mvs[24];
int i, do_clamp = xd->mbmi.need_to_clamp_mvs;
int i, do_clamp = xd->mode_info_context->mbmi.need_to_clamp_mvs;
if (xd->mbmi.mb_skip_coeff)
if (xd->mode_info_context->mbmi.mb_skip_coeff)
{
vp8_reset_mb_tokens_context(xd);
}
......@@ -199,20 +198,12 @@ void vp8_decode_macroblock(VP8D_COMP *pbi, MACROBLOCKD *xd)
/* Perform temporary clamping of the MV to be used for prediction */
if (do_clamp)
{
if (xd->mbmi.mode == SPLITMV)
for (i=0; i<24; i++)
orig_mvs[i] = xd->block[i].bmi.mv.as_mv;
else
{
orig_mvs[0] = xd->mbmi.mv.as_mv;
orig_mvs[1] = xd->block[16].bmi.mv.as_mv;
}
clamp_mvs(xd);
}
xd->mode_info_context->mbmi.dc_diff = 1;
if (xd->mbmi.mode != B_PRED && xd->mbmi.mode != SPLITMV && eobtotal == 0)
if (xd->mode_info_context->mbmi.mode != B_PRED && xd->mode_info_context->mbmi.mode != SPLITMV && eobtotal == 0)
{
xd->mode_info_context->mbmi.dc_diff = 0;
skip_recon_mb(pbi, xd);
......@@ -223,11 +214,11 @@ void vp8_decode_macroblock(VP8D_COMP *pbi, MACROBLOCKD *xd)
mb_init_dequantizer(pbi, xd);
// do prediction
if (xd->frame_type == KEY_FRAME || xd->mbmi.ref_frame == INTRA_FRAME)
if (xd->frame_type == KEY_FRAME || xd->mode_info_context->mbmi.ref_frame == INTRA_FRAME)
{
vp8_build_intra_predictors_mbuv(xd);
if (xd->mbmi.mode != B_PRED)
if (xd->mode_info_context->mbmi.mode != B_PRED)
{
vp8_build_intra_predictors_mby_ptr(xd);
} else {
......@@ -240,7 +231,7 @@ void vp8_decode_macroblock(VP8D_COMP *pbi, MACROBLOCKD *xd)
}
// dequantization and idct
if (xd->mbmi.mode != B_PRED && xd->mbmi.mode != SPLITMV)
if (xd->mode_info_context->mbmi.mode != B_PRED && xd->mode_info_context->mbmi.mode != SPLITMV)
{
BLOCKD *b = &xd->block[24];
DEQUANT_INVOKE(&pbi->dequant, block)(b);
......@@ -283,7 +274,7 @@ void vp8_decode_macroblock(VP8D_COMP *pbi, MACROBLOCKD *xd)
}
}
}
else if ((xd->frame_type == KEY_FRAME || xd->mbmi.ref_frame == INTRA_FRAME) && xd->mbmi.mode == B_PRED)
else if ((xd->frame_type == KEY_FRAME || xd->mode_info_context->mbmi.ref_frame == INTRA_FRAME) && xd->mode_info_context->mbmi.mode == B_PRED)
{
for (i = 0; i < 16; i++)
{
......@@ -394,12 +385,8 @@ void vp8_decode_mb_row(VP8D_COMP *pbi,
for (mb_col = 0; mb_col < pc->mb_cols; mb_col++)
{
// Take a copy of the mode and Mv information for this macroblock into the xd->mbmi
// the partition_bmi array is unused in the decoder, so don't copy it.
vpx_memcpy(&xd->mbmi, &xd->mode_info_context->mbmi,
sizeof(MB_MODE_INFO) - sizeof(xd->mbmi.partition_bmi));
if (xd->mbmi.mode == SPLITMV || xd->mbmi.mode == B_PRED)
if (xd->mode_info_context->mbmi.mode == SPLITMV || xd->mode_info_context->mbmi.mode == B_PRED)
{
for (i = 0; i < 16; i++)
{
......@@ -420,9 +407,9 @@ void vp8_decode_mb_row(VP8D_COMP *pbi,
xd->left_available = (mb_col != 0);
// Select the appropriate reference frame for this MB
if (xd->mbmi.ref_frame == LAST_FRAME)
if (xd->mode_info_context->mbmi.ref_frame == LAST_FRAME)
ref_fb_idx = pc->lst_fb_idx;
else if (xd->mbmi.ref_frame == GOLDEN_FRAME)
else if (xd->mode_info_context->mbmi.ref_frame == GOLDEN_FRAME)
ref_fb_idx = pc->gld_fb_idx;
else
ref_fb_idx = pc->alt_fb_idx;
......@@ -608,7 +595,7 @@ static void init_frame(VP8D_COMP *pbi)
xd->left_context = pc->left_context;
xd->mode_info_context = pc->mi;
xd->frame_type = pc->frame_type;
xd->mbmi.mode = DC_PRED;
xd->mode_info_context->mbmi.mode = DC_PRED;
xd->mode_info_stride = pc->mode_info_stride;
}
......
......@@ -95,7 +95,7 @@ void vp8_reset_mb_tokens_context(MACROBLOCKD *x)
*(l+1) = 0;
/* Clear entropy contexts for Y2 blocks */
if (x->mbmi.mode != B_PRED && x->mbmi.mode != SPLITMV)
if (x->mode_info_context->mbmi.mode != B_PRED && x->mode_info_context->mbmi.mode != SPLITMV)
{
a = A[Y2CONTEXT];
l = L[Y2CONTEXT];
......@@ -240,7 +240,7 @@ int vp8_decode_mb_tokens(VP8D_COMP *dx, MACROBLOCKD *x)
i = 0;
stop = 16;
if (x->mbmi.mode != B_PRED && x->mbmi.mode != SPLITMV)
if (x->mode_info_context->mbmi.mode != B_PRED && x->mode_info_context->mbmi.mode != SPLITMV)
{
i = 24;
stop = 24;
......
......@@ -69,9 +69,6 @@ void vp8_setup_decoding_thread_data(VP8D_COMP *pbi, MACROBLOCKD *xd, MB_ROW_DEC
mbd->mb_segement_abs_delta = xd->mb_segement_abs_delta;
vpx_memcpy(mbd->segment_feature_data, xd->segment_feature_data, sizeof(xd->segment_feature_data));
mbd->mbmi.mode = DC_PRED;
mbd->mbmi.uv_mode = DC_PRED;
mbd->current_bc = &pbi->bc2;
for (j = 0; j < 25; j++)
......@@ -222,12 +219,7 @@ THREAD_FUNCTION vp8_thread_decoding_proc(void *p_data)
}
}
// Take a copy of the mode and Mv information for this macroblock into the xd->mbmi
// the partition_bmi array is unused in the decoder, so don't copy it.
vpx_memcpy(&xd->mbmi, &xd->mode_info_context->mbmi,
sizeof(MB_MODE_INFO) - sizeof(xd->mbmi.partition_bmi));
if (xd->mbmi.mode == SPLITMV || xd->mbmi.mode == B_PRED)
if (xd->mode_info_context->mbmi.mode == SPLITMV || xd->mode_info_context->mbmi.mode == B_PRED)
{
for (i = 0; i < 16; i++)
{
......@@ -248,9 +240,9 @@ THREAD_FUNCTION vp8_thread_decoding_proc(void *p_data)
xd->left_available = (mb_col != 0);
// Select the appropriate reference frame for this MB
if (xd->mbmi.ref_frame == LAST_FRAME)
if (xd->mode_info_context->mbmi.ref_frame == LAST_FRAME)
ref_fb_idx = pc->lst_fb_idx;
else if (xd->mbmi.ref_frame == GOLDEN_FRAME)
else if (xd->mode_info_context->mbmi.ref_frame == GOLDEN_FRAME)
ref_fb_idx = pc->gld_fb_idx;
else
ref_fb_idx = pc->alt_fb_idx;
......@@ -639,12 +631,7 @@ void vp8_mtdecode_mb_rows(VP8D_COMP *pbi,
}
// Take a copy of the mode and Mv information for this macroblock into the xd->mbmi
// the partition_bmi array is unused in the decoder, so don't copy it.
vpx_memcpy(&xd->mbmi, &xd->mode_info_context->mbmi,
sizeof(MB_MODE_INFO) - sizeof(xd->mbmi.partition_bmi));
if (xd->mbmi.mode == SPLITMV || xd->mbmi.mode == B_PRED)
if (xd->mode_info_context->mbmi.mode == SPLITMV || xd->mode_info_context->mbmi.mode == B_PRED)
{
for (i = 0; i < 16; i++)
{
......@@ -665,9 +652,9 @@ void vp8_mtdecode_mb_rows(VP8D_COMP *pbi,
xd->left_available = (mb_col != 0);
// Select the appropriate reference frame for this MB
if (xd->mbmi.ref_frame == LAST_FRAME)
if (xd->mode_info_context->mbmi.ref_frame == LAST_FRAME)
ref_fb_idx = pc->lst_fb_idx;
else if (xd->mbmi.ref_frame == GOLDEN_FRAME)
else if (xd->mode_info_context->mbmi.ref_frame == GOLDEN_FRAME)
ref_fb_idx = pc->gld_fb_idx;
else
ref_fb_idx = pc->alt_fb_idx;
......
......@@ -254,7 +254,6 @@ void vp8cx_mb_init_quantizer(VP8_COMP *cpi, MACROBLOCK *x)
int i;
int QIndex;
MACROBLOCKD *xd = &x->e_mbd;
MB_MODE_INFO *mbmi = &xd->mbmi;
int zbin_extra;
// Select the baseline MB Q index.
......@@ -262,12 +261,12 @@ void vp8cx_mb_init_quantizer(VP8_COMP *cpi, MACROBLOCK *x)
{
// Abs Value
if (xd->mb_segement_abs_delta == SEGMENT_ABSDATA)
QIndex = xd->segment_feature_data[MB_LVL_ALT_Q][mbmi->segment_id];
QIndex = xd->segment_feature_data[MB_LVL_ALT_Q][xd->mode_info_context->mbmi.segment_id];
// Delta Value
else
{
QIndex = cpi->common.base_qindex + xd->segment_feature_data[MB_LVL_ALT_Q][mbmi->segment_id];
QIndex = cpi->common.base_qindex + xd->segment_feature_data[MB_LVL_ALT_Q][xd->mode_info_context->mbmi.segment_id];
QIndex = (QIndex >= 0) ? ((QIndex <= MAXQ) ? QIndex : MAXQ) : 0; // Clamp to valid range
}
}
......@@ -388,14 +387,14 @@ void encode_mb_row(VP8_COMP *cpi,
{
// Code to set segment id in xd->mbmi.segment_id for current MB (with range checking)
if (cpi->segmentation_map[seg_map_index+mb_col] <= 3)
xd->mbmi.segment_id = cpi->segmentation_map[seg_map_index+mb_col];
xd->mode_info_context->mbmi.segment_id = cpi->segmentation_map[seg_map_index+mb_col];
else
xd->mbmi.segment_id = 0;
xd->mode_info_context->mbmi.segment_id = 0;
vp8cx_mb_init_quantizer(cpi, x);
}
else
xd->mbmi.segment_id = 0; // Set to Segment 0 by default
xd->mode_info_context->mbmi.segment_id = 0; // Set to Segment 0 by default
x->active_ptr = cpi->active_map + seg_map_index + mb_col;
......@@ -426,7 +425,7 @@ void encode_mb_row(VP8_COMP *cpi,
#endif
// Count of last ref frame 0,0 useage
if ((xd->mbmi.mode == ZEROMV) && (xd->mbmi.ref_frame == LAST_FRAME))
if ((xd->mode_info_context->mbmi.mode == ZEROMV) && (xd->mode_info_context->mbmi.ref_frame == LAST_FRAME))
cpi->inter_zz_count ++;
// Special case code for cyclic refresh
......@@ -434,14 +433,14 @@ void encode_mb_row(VP8_COMP *cpi,
// during vp8cx_encode_inter_macroblock()) back into the global sgmentation map
if (cpi->cyclic_refresh_mode_enabled && xd->segmentation_enabled)
{
cpi->segmentation_map[seg_map_index+mb_col] = xd->mbmi.segment_id;
cpi->segmentation_map[seg_map_index+mb_col] = xd->mode_info_context->mbmi.segment_id;
// If the block has been refreshed mark it as clean (the magnitude of the -ve influences how long it will be before we consider another refresh):
// Else if it was coded (last frame 0,0) and has not already been refreshed then mark it as a candidate for cleanup next time (marked 0)
// else mark it as dirty (1).
if (xd->mbmi.segment_id)
if (xd->mode_info_context->mbmi.segment_id)
cpi->cyclic_refresh_map[seg_map_index+mb_col] = -1;
else if ((xd->mbmi.mode == ZEROMV) && (xd->mbmi.ref_frame == LAST_FRAME))
else if ((xd->mode_info_context->mbmi.mode == ZEROMV) && (xd->mode_info_context->mbmi.ref_frame == LAST_FRAME))
{
if (cpi->cyclic_refresh_map[seg_map_index+mb_col] == 1)
cpi->cyclic_refresh_map[seg_map_index+mb_col] = 0;
......@@ -456,9 +455,6 @@ void encode_mb_row(VP8_COMP *cpi,
x->gf_active_ptr++; // Increment pointer into gf useage flags structure for next mb
// store macroblock mode info into context array
vpx_memcpy(&xd->mode_info_context->mbmi, &xd->mbmi, sizeof(xd->mbmi));
for (i = 0; i < 16; i++)
vpx_memcpy(&xd->mode_info_context->bmi[i], &xd->block[i].bmi, sizeof(xd->block[i].bmi));
......@@ -471,7 +467,7 @@ void encode_mb_row(VP8_COMP *cpi,
recon_uvoffset += 8;
// Keep track of segment useage
segment_counts[xd->mbmi.segment_id] ++;
segment_counts[xd->mode_info_context->mbmi.segment_id] ++;
// skip to next mb
xd->mode_info_context++;
......@@ -627,8 +623,8 @@ void vp8_encode_frame(VP8_COMP *cpi)
//x->rdmult = (int)(cpi->RDMULT * pow( (cpi->rate_correction_factor * 2.0), 0.75 ));
#endif
xd->mbmi.mode = DC_PRED;
xd->mbmi.uv_mode = DC_PRED;
xd->mode_info_context->mbmi.mode = DC_PRED;
xd->mode_info_context->mbmi.uv_mode = DC_PRED;
xd->left_context = cm->left_context;
......@@ -982,8 +978,8 @@ void vp8_build_block_offsets(MACROBLOCK *x)
static void sum_intra_stats(VP8_COMP *cpi, MACROBLOCK *x)
{
const MACROBLOCKD *xd = & x->e_mbd;
const MB_PREDICTION_MODE m = xd->mbmi.mode;
const MB_PREDICTION_MODE uvm = xd->mbmi.uv_mode;
const MB_PREDICTION_MODE m = xd->mode_info_context->mbmi.mode;
const MB_PREDICTION_MODE uvm = xd->mode_info_context->mbmi.uv_mode;
#ifdef MODE_STATS
const int is_key = cpi->common.frame_type == KEY_FRAME;
......@@ -1021,7 +1017,7 @@ int vp8cx_encode_intra_macro_block(VP8_COMP *cpi, MACROBLOCK *x, TOKENEXTRA **t)
int rateuv_tokenonly = 0;
int i;
x->e_mbd.mbmi.ref_frame = INTRA_FRAME;
x->e_mbd.mode_info_context->mbmi.ref_frame = INTRA_FRAME;
#if !(CONFIG_REALTIME_ONLY)
......@@ -1037,7 +1033,7 @@ int vp8cx_encode_intra_macro_block(VP8_COMP *cpi, MACROBLOCK *x, TOKENEXTRA **t)
error_uv = vp8_rd_pick_intra_mbuv_mode(cpi, x, &rateuv, &rateuv_tokenonly, &distuv);
x->e_mbd.mbmi.mb_skip_coeff = (cpi->common.mb_no_coeff_skip) ? 1 : 0;
x->e_mbd.mode_info_context->mbmi.mb_skip_coeff = (cpi->common.mb_no_coeff_skip) ? 1 : 0;
vp8_encode_intra16x16mbuv(IF_RTCD(&cpi->rtcd), x);
rate += rateuv;
......@@ -1045,7 +1041,7 @@ int vp8cx_encode_intra_macro_block(VP8_COMP *cpi, MACROBLOCK *x, TOKENEXTRA **t)
if (Error4x4 < Error16x16)
{
rate += rate4x4;
x->e_mbd.mbmi.mode = B_PRED;
x->e_mbd.mode_info_context->mbmi.mode = B_PRED;
// get back the intra block modes
for (i = 0; i < 16; i++)
......@@ -1085,7 +1081,7 @@ int vp8cx_encode_intra_macro_block(VP8_COMP *cpi, MACROBLOCK *x, TOKENEXTRA **t)
for (mode = DC_PRED; mode <= TM_PRED; mode ++)
{
x->e_mbd.mbmi.mode = mode;
x->e_mbd.mode_info_context->mbmi.mode = mode;
vp8_build_intra_predictors_mby_ptr(&x->e_mbd);
distortion2 = VARIANCE_INVOKE(&cpi->rtcd.variance, get16x16prederror)(x->src.y_buffer, x->src.y_stride, x->e_mbd.predictor, 16, 0x7fffffff);
rate2 = x->mbmode_cost[x->e_mbd.frame_type][mode];
......@@ -1105,17 +1101,17 @@ int vp8cx_encode_intra_macro_block(VP8_COMP *cpi, MACROBLOCK *x, TOKENEXTRA **t)
else
Error4x4 = RD_ESTIMATE(x->rdmult, x->rddiv, rate2, distortion2);
x->e_mbd.mbmi.mb_skip_coeff = (cpi->common.mb_no_coeff_skip) ? 1 : 0;
x->e_mbd.mode_info_context->mbmi.mb_skip_coeff = (cpi->common.mb_no_coeff_skip) ? 1 : 0;
if (Error4x4 < Error16x16)
{
x->e_mbd.mbmi.mode = B_PRED;
x->e_mbd.mode_info_context->mbmi.mode = B_PRED;
vp8_encode_intra4x4mby(IF_RTCD(&cpi->rtcd), x);
cpi->prediction_error += Error4x4;
}
else
{
x->e_mbd.mbmi.mode = best_mode;
x->e_mbd.mode_info_context->mbmi.mode = best_mode;
vp8_encode_intra16x16mby(IF_RTCD(&cpi->rtcd), x);
cpi->prediction_error += Error16x16;
}
......@@ -1149,7 +1145,7 @@ int vp8cx_encode_inter_macroblock
x->skip = 0;
if (xd->segmentation_enabled)
x->encode_breakout = cpi->segment_encode_breakout[xd->mbmi.segment_id];
x->encode_breakout = cpi->segment_encode_breakout[xd->mode_info_context->mbmi.segment_id];
else
x->encode_breakout = cpi->oxcf.encode_breakout;
......@@ -1180,17 +1176,17 @@ int vp8cx_encode_inter_macroblock
if (cpi->cyclic_refresh_mode_enabled)
{
// Clear segment_id back to 0 if not coded (last frame 0,0)
if ((xd->mbmi.segment_id == 1) &&
((xd->mbmi.ref_frame != LAST_FRAME) || (xd->mbmi.mode != ZEROMV)))
if ((xd->mode_info_context->mbmi.segment_id == 1) &&
((xd->mode_info_context->mbmi.ref_frame != LAST_FRAME) || (xd->mode_info_context->mbmi.mode != ZEROMV)))
{
xd->mbmi.segment_id = 0;
xd->mode_info_context->mbmi.segment_id = 0;
}
}
// Experimental code. Special case for gf and arf zeromv modes. Increase zbin size to supress noise
if (cpi->zbin_mode_boost_enabled)
{
if ((xd->mbmi.mode == ZEROMV) && (xd->mbmi.ref_frame != LAST_FRAME))
if ((xd->mode_info_context->mbmi.mode == ZEROMV) && (xd->mode_info_context->mbmi.ref_frame != LAST_FRAME))
cpi->zbin_mode_boost = GF_ZEROMV_ZBIN_BOOST;
else
cpi->zbin_mode_boost = 0;
......@@ -1199,15 +1195,15 @@ int vp8cx_encode_inter_macroblock
vp8cx_mb_init_quantizer(cpi, x);
}
cpi->count_mb_ref_frame_usage[xd->mbmi.ref_frame] ++;
cpi->count_mb_ref_frame_usage[xd->mode_info_context->mbmi.ref_frame] ++;
if (xd->mbmi.ref_frame == INTRA_FRAME)
if (xd->mode_info_context->mbmi.ref_frame == INTRA_FRAME)
{
x->e_mbd.mbmi.mb_skip_coeff = (cpi->common.mb_no_coeff_skip) ? 1 : 0;
x->e_mbd.mode_info_context->mbmi.mb_skip_coeff = (cpi->common.mb_no_coeff_skip) ? 1 : 0;
vp8_encode_intra16x16mbuv(IF_RTCD(&cpi->rtcd), x);
if (xd->mbmi.mode == B_PRED)
if (xd->mode_info_context->mbmi.mode == B_PRED)
{
vp8_encode_intra4x4mby(IF_RTCD(&cpi->rtcd), x);
}
......@@ -1226,13 +1222,13 @@ int vp8cx_encode_inter_macroblock
int ref_fb_idx;
vp8_find_near_mvs(xd, xd->mode_info_context,
&nearest, &nearby, &best_ref_mv, mdcounts, xd->mbmi.ref_frame, cpi->common.ref_frame_sign_bias);
&nearest, &nearby, &best_ref_mv, mdcounts, xd->mode_info_context->mbmi.ref_frame, cpi->common.ref_frame_sign_bias);
vp8_build_uvmvs(xd, cpi->common.full_pixel);
if (xd->mbmi.ref_frame == LAST_FRAME)
if (xd->mode_info_context->mbmi.ref_frame == LAST_FRAME)
ref_fb_idx = cpi->common.lst_fb_idx;
else if (xd->mbmi.ref_frame == GOLDEN_FRAME)
else if (xd->mode_info_context->mbmi.ref_frame == GOLDEN_FRAME)
ref_fb_idx = cpi->common.gld_fb_idx;
else
ref_fb_idx = cpi->common.alt_fb_idx;
......@@ -1241,7 +1237,7 @@ int vp8cx_encode_inter_macroblock
xd->pre.u_buffer = cpi->common.yv12_fb[ref_fb_idx].u_buffer + recon_uvoffset;
xd->pre.v_buffer = cpi->common.yv12_fb[ref_fb_idx].v_buffer + recon_uvoffset;
if (xd->mbmi.mode == SPLITMV)
if (xd->mode_info_context->mbmi.mode == SPLITMV)
{
int i;
......@@ -1254,19 +1250,19 @@ int vp8cx_encode_inter_macroblock
}
}
}