Commit 57f180b3 authored by Scott LaVarnway's avatar Scott LaVarnway

Removed bmi from blockd

This originally was "Removed update_blockd_bmi()".  Now,
this patch removed bmi from blockd and uses the bmi found
in mode_info_context.  Eliminates unnecessary bmi copies between
blockd and mode_info_context.

Change-Id: I287a4972974bb363f49e528daa9b2a2293f4bc76
parent 18f29ff5
......@@ -283,7 +283,7 @@ typedef struct blockd {
int dst;
int dst_stride;
union b_mode_info bmi;
// union b_mode_info bmi;
} BLOCKD;
struct scale_factors {
......@@ -585,23 +585,22 @@ static TX_TYPE get_tx_type_4x4(const MACROBLOCKD *xd, int ib) {
return DCT_DCT;
if (xd->mode_info_context->mbmi.mode == I4X4_PRED &&
xd->q_index < ACTIVE_HT) {
const BLOCKD *b = &xd->block[ib];
tx_type = txfm_map(
#if CONFIG_NEWBINTRAMODES
b->bmi.as_mode.first == B_CONTEXT_PRED ? b->bmi.as_mode.context :
xd->mode_info_context->bmi[ib].as_mode.first == B_CONTEXT_PRED ?
xd->mode_info_context->bmi[ib].as_mode.context :
#endif
b->bmi.as_mode.first);
xd->mode_info_context->bmi[ib].as_mode.first);
} else if (xd->mode_info_context->mbmi.mode == I8X8_PRED &&
xd->q_index < ACTIVE_HT) {
const BLOCKD *b = &xd->block[ib];
const int ic = (ib & 10);
#if USE_ADST_FOR_I8X8_4X4
#if USE_ADST_PERIPHERY_ONLY
// Use ADST for periphery blocks only
const int inner = ib & 5;
b += ic - ib;
tx_type = txfm_map(pred_mode_conv(
(MB_PREDICTION_MODE)b->bmi.as_mode.first));
(MB_PREDICTION_MODE)xd->mode_info_context->bmi[ic].as_mode.first));
#if USE_ADST_FOR_REMOTE_EDGE
if (inner == 5)
tx_type = DCT_DCT;
......@@ -672,11 +671,10 @@ static TX_TYPE get_tx_type_8x8(const MACROBLOCKD *xd, int ib) {
return tx_type;
if (xd->mode_info_context->mbmi.mode == I8X8_PRED &&
xd->q_index < ACTIVE_HT8) {
const BLOCKD *b = &xd->block[ib];
// TODO(rbultje): MB_PREDICTION_MODE / B_PREDICTION_MODE should be merged
// or the relationship otherwise modified to address this type conversion.
tx_type = txfm_map(pred_mode_conv(
(MB_PREDICTION_MODE)b->bmi.as_mode.first));
(MB_PREDICTION_MODE)xd->mode_info_context->bmi[ib].as_mode.first));
} else if (xd->mode_info_context->mbmi.mode < I8X8_PRED &&
xd->q_index < ACTIVE_HT8) {
#if USE_ADST_FOR_I16X16_8X8
......@@ -748,16 +746,6 @@ static TX_TYPE get_tx_type_16x16(const MACROBLOCKD *xd, int ib) {
void vp9_build_block_doffsets(MACROBLOCKD *xd);
void vp9_setup_block_dptrs(MACROBLOCKD *xd);
static void update_blockd_bmi(MACROBLOCKD *xd) {
const MB_PREDICTION_MODE mode = xd->mode_info_context->mbmi.mode;
if (mode == SPLITMV || mode == I8X8_PRED || mode == I4X4_PRED) {
int i;
for (i = 0; i < 16; i++)
xd->block[i].bmi = xd->mode_info_context->bmi[i];
}
}
static TX_SIZE get_uv_tx_size(const MACROBLOCKD *xd) {
MB_MODE_INFO *mbmi = &xd->mode_info_context->mbmi;
const TX_SIZE size = mbmi->txfm_size;
......
......@@ -307,18 +307,6 @@ MV clamp_mv_to_umv_border_sb(const MV *src_mv,
return clamped_mv;
}
// TODO(jkoleszar): In principle, nothing has to depend on this, but it's
// currently required. Some users look at the mi->bmi, some look at the
// xd->bmi.
static void duplicate_splitmv_bmi(MACROBLOCKD *xd) {
int i;
for (i = 0; i < 16; i += 2) {
xd->block[i + 0].bmi = xd->mode_info_context->bmi[i + 0];
xd->block[i + 1].bmi = xd->mode_info_context->bmi[i + 1];
}
}
struct build_inter_predictors_args {
MACROBLOCKD *xd;
int x;
......@@ -366,7 +354,7 @@ static void build_inter_predictors(int plane, int block,
if (xd->mode_info_context->mbmi.mode == SPLITMV) {
if (plane == 0) {
mv = &xd->block[block].bmi.as_mv[which_mv].as_mv;
mv = &xd->mode_info_context->bmi[block].as_mv[which_mv].as_mv;
} else {
const int y_block = (block & 2) * 4 + (block & 1) * 2;
split_chroma_mv.row = mi_mv_pred_row_q4(xd, y_block, which_mv);
......@@ -410,11 +398,6 @@ void vp9_build_inter_predictors_sby(MACROBLOCKD *xd,
{{xd->plane[0].pre[0].stride, 0, 0}, {xd->plane[0].pre[1].stride, 0, 0}},
};
// TODO(jkoleszar): This is a hack no matter where you put it, but does it
// belong here?
if (xd->mode_info_context->mbmi.mode == SPLITMV)
duplicate_splitmv_bmi(xd);
foreach_predicted_block_in_plane(xd, bsize, 0, build_inter_predictors, &args);
}
void vp9_build_inter_predictors_sbuv(MACROBLOCKD *xd,
......
......@@ -253,7 +253,7 @@ void vp9_intra4x4_predict(MACROBLOCKD *xd,
#if CONFIG_NEWBINTRAMODES
if (b_mode == B_CONTEXT_PRED)
b_mode = x->bmi.as_mode.context;
b_mode = xd->mode_info_context->bmi[block_idx].as_mode.context;
#endif
switch (b_mode) {
......
......@@ -591,7 +591,7 @@ specialize vp9_short_walsh8x4
#
# Motion search
#
prototype int vp9_full_search_sad "struct macroblock *x, struct blockd *d, union int_mv *ref_mv, int sad_per_bit, int distance, struct vp9_variance_vtable *fn_ptr, DEC_MVCOSTS, union int_mv *center_mv"
prototype int vp9_full_search_sad "struct macroblock *x, struct blockd *d, union int_mv *ref_mv, int sad_per_bit, int distance, struct vp9_variance_vtable *fn_ptr, DEC_MVCOSTS, union int_mv *center_mv, int n"
specialize vp9_full_search_sad sse3 sse4_1
vp9_full_search_sad_sse3=vp9_full_search_sadx3
vp9_full_search_sad_sse4_1=vp9_full_search_sadx8
......
......@@ -1044,7 +1044,5 @@ void vp9_decode_mb_mode_mv(VP9D_COMP* const pbi,
for (y = 0; y < y_mbs; y++)
for (x = !y; x < x_mbs; x++)
mi[y * mis + x] = *mi;
} else {
update_blockd_bmi(xd);
}
}
......@@ -254,7 +254,7 @@ static void decode_8x8(MACROBLOCKD *xd) {
int stride = xd->plane[0].dst.stride;
if (mode == I8X8_PRED) {
BLOCKD *b = &xd->block[ib];
int i8x8mode = b->bmi.as_mode.first;
int i8x8mode = xd->mode_info_context->bmi[ib].as_mode.first;
vp9_intra8x8_predict(xd, b, i8x8mode, dst, stride);
}
tx_type = get_tx_type_8x8(xd, ib);
......@@ -271,7 +271,7 @@ static void decode_8x8(MACROBLOCKD *xd) {
for (i = 0; i < 4; i++) {
int ib = vp9_i8x8_block[i];
BLOCKD *b = &xd->block[ib];
int i8x8mode = b->bmi.as_mode.first;
int i8x8mode = xd->mode_info_context->bmi[ib].as_mode.first;
b = &xd->block[16 + i];
vp9_intra_uv4x4_predict(xd, b, i8x8mode, *(b->base_dst) + b->dst,
......@@ -324,7 +324,7 @@ static void decode_4x4(VP9D_COMP *pbi, MACROBLOCKD *xd, vp9_reader *r) {
const int iblock[4] = {0, 1, 4, 5};
int j;
BLOCKD *b = &xd->block[ib];
int i8x8mode = b->bmi.as_mode.first;
int i8x8mode = xd->mode_info_context->bmi[ib].as_mode.first;
vp9_intra8x8_predict(xd, b, i8x8mode, *(b->base_dst) + b->dst,
b->dst_stride);
for (j = 0; j < 4; j++) {
......@@ -349,7 +349,7 @@ static void decode_4x4(VP9D_COMP *pbi, MACROBLOCKD *xd, vp9_reader *r) {
BLOCKD *b = &xd->block[i];
int b_mode = xd->mode_info_context->bmi[i].as_mode.first;
#if CONFIG_NEWBINTRAMODES
xd->mode_info_context->bmi[i].as_mode.context = b->bmi.as_mode.context =
xd->mode_info_context->bmi[i].as_mode.context =
vp9_find_bpred_context(xd, b);
if (!xd->mode_info_context->mbmi.mb_skip_coeff)
vp9_decode_coefs_4x4(pbi, xd, r, PLANE_TYPE_Y_WITH_DC, i);
......
......@@ -383,16 +383,7 @@ static void update_state(VP9_COMP *cpi,
ctx->txfm_rd_diff[ALLOW_32X32] = ctx->txfm_rd_diff[ALLOW_16X16];
}
if (mb_mode == I4X4_PRED) {
for (i = 0; i < 16; i++) {
xd->block[i].bmi.as_mode = xd->mode_info_context->bmi[i].as_mode;
assert(xd->block[i].bmi.as_mode.first < B_MODE_COUNT);
}
} else if (mb_mode == I8X8_PRED) {
for (i = 0; i < 16; i++) {
xd->block[i].bmi = xd->mode_info_context->bmi[i];
}
} else if (mb_mode == SPLITMV) {
if (mb_mode == SPLITMV) {
vpx_memcpy(x->partition_info, &ctx->partition_info,
sizeof(PARTITION_INFO));
......@@ -1828,15 +1819,15 @@ static void sum_intra_stats(VP9_COMP *cpi, MACROBLOCK *x) {
if (m != I8X8_PRED)
++cpi->y_uv_mode_count[m][uvm];
else {
cpi->i8x8_mode_count[xd->block[0].bmi.as_mode.first]++;
cpi->i8x8_mode_count[xd->block[2].bmi.as_mode.first]++;
cpi->i8x8_mode_count[xd->block[8].bmi.as_mode.first]++;
cpi->i8x8_mode_count[xd->block[10].bmi.as_mode.first]++;
cpi->i8x8_mode_count[xd->mode_info_context->bmi[0].as_mode.first]++;
cpi->i8x8_mode_count[xd->mode_info_context->bmi[2].as_mode.first]++;
cpi->i8x8_mode_count[xd->mode_info_context->bmi[8].as_mode.first]++;
cpi->i8x8_mode_count[xd->mode_info_context->bmi[10].as_mode.first]++;
}
if (m == I4X4_PRED) {
int b = 0;
do {
int m = xd->block[b].bmi.as_mode.first;
int m = xd->mode_info_context->bmi[b].as_mode.first;
#if CONFIG_NEWBINTRAMODES
if (m == B_CONTEXT_PRED) m -= CONTEXT_PRED_REPLACEMENTS;
#endif
......
......@@ -32,7 +32,7 @@ int vp9_encode_intra(VP9_COMP *cpi, MACROBLOCK *x, int use_16x16_pred) {
int i;
for (i = 0; i < 16; i++) {
x->e_mbd.block[i].bmi.as_mode.first = B_DC_PRED;
x->e_mbd.mode_info_context->bmi[i].as_mode.first = B_DC_PRED;
encode_intra4x4block(x, i);
}
}
......@@ -58,10 +58,12 @@ static void encode_intra4x4block(MACROBLOCK *x, int ib) {
assert(ib < 16);
#if CONFIG_NEWBINTRAMODES
b->bmi.as_mode.context = vp9_find_bpred_context(&x->e_mbd, b);
xd->mode_info_context->bmi[ib].as_mode.context =
vp9_find_bpred_context(&x->e_mbd, b);
#endif
vp9_intra4x4_predict(&x->e_mbd, b, b->bmi.as_mode.first,
vp9_intra4x4_predict(&x->e_mbd, b,
xd->mode_info_context->bmi[ib].as_mode.first,
*(b->base_dst) + b->dst, b->dst_stride);
vp9_subtract_block(4, 4, src_diff, 16,
src, x->plane[0].src.stride,
......@@ -169,7 +171,7 @@ void vp9_encode_intra8x8(MACROBLOCK *x, int ib) {
int i;
TX_TYPE tx_type;
vp9_intra8x8_predict(xd, b, b->bmi.as_mode.first,
vp9_intra8x8_predict(xd, b, xd->mode_info_context->bmi[ib].as_mode.first,
*(b->base_dst) + b->dst, b->dst_stride);
// generate residual blocks
vp9_subtract_block(8, 8, src_diff, 16,
......@@ -287,8 +289,7 @@ void vp9_encode_intra8x8mbuv(MACROBLOCK *x) {
int i;
for (i = 0; i < 4; i++) {
BLOCKD *b = &x->e_mbd.block[vp9_i8x8_block[i]];
int mode = b->bmi.as_mode.first;
int mode = x->e_mbd.mode_info_context->bmi[vp9_i8x8_block[i]].as_mode.first;
encode_intra_uv4x4(x, i + 16, mode); // u
encode_intra_uv4x4(x, i + 20, mode); // v
......
......@@ -1574,7 +1574,7 @@ int vp9_full_search_sad_c(MACROBLOCK *x, BLOCKD *d, int_mv *ref_mv,
int sad_per_bit, int distance,
vp9_variance_fn_ptr_t *fn_ptr, int *mvjcost,
int *mvcost[2],
int_mv *center_mv) {
int_mv *center_mv, int n) {
const MACROBLOCKD* const xd = &x->e_mbd;
uint8_t *what = x->plane[0].src.buf;
int what_stride = x->plane[0].src.stride;
......@@ -1582,7 +1582,7 @@ int vp9_full_search_sad_c(MACROBLOCK *x, BLOCKD *d, int_mv *ref_mv,
int in_what_stride = xd->plane[0].pre[0].stride;
int mv_stride = xd->plane[0].pre[0].stride;
uint8_t *bestaddress;
int_mv *best_mv = &d->bmi.as_mv[0];
int_mv *best_mv = &x->e_mbd.mode_info_context->bmi[n].as_mv[0];
int_mv this_mv;
int bestsad = INT_MAX;
int r, c;
......@@ -1669,7 +1669,7 @@ int vp9_full_search_sad_c(MACROBLOCK *x, BLOCKD *d, int_mv *ref_mv,
int vp9_full_search_sadx3(MACROBLOCK *x, BLOCKD *d, int_mv *ref_mv,
int sad_per_bit, int distance,
vp9_variance_fn_ptr_t *fn_ptr, int *mvjcost,
int *mvcost[2], int_mv *center_mv) {
int *mvcost[2], int_mv *center_mv, int n) {
const MACROBLOCKD* const xd = &x->e_mbd;
uint8_t *what = x->plane[0].src.buf;
int what_stride = x->plane[0].src.stride;
......@@ -1677,7 +1677,7 @@ int vp9_full_search_sadx3(MACROBLOCK *x, BLOCKD *d, int_mv *ref_mv,
int in_what_stride = xd->plane[0].pre[0].stride;
int mv_stride = xd->plane[0].pre[0].stride;
uint8_t *bestaddress;
int_mv *best_mv = &d->bmi.as_mv[0];
int_mv *best_mv = &x->e_mbd.mode_info_context->bmi[n].as_mv[0];
int_mv this_mv;
unsigned int bestsad = INT_MAX;
int r, c;
......@@ -1798,7 +1798,7 @@ int vp9_full_search_sadx8(MACROBLOCK *x, BLOCKD *d, int_mv *ref_mv,
int sad_per_bit, int distance,
vp9_variance_fn_ptr_t *fn_ptr,
int *mvjcost, int *mvcost[2],
int_mv *center_mv) {
int_mv *center_mv, int n) {
const MACROBLOCKD* const xd = &x->e_mbd;
uint8_t *what = x->plane[0].src.buf;
int what_stride = x->plane[0].src.stride;
......@@ -1806,7 +1806,7 @@ int vp9_full_search_sadx8(MACROBLOCK *x, BLOCKD *d, int_mv *ref_mv,
int in_what_stride = xd->plane[0].pre[0].stride;
int mv_stride = xd->plane[0].pre[0].stride;
uint8_t *bestaddress;
int_mv *best_mv = &d->bmi.as_mv[0];
int_mv *best_mv = &x->e_mbd.mode_info_context->bmi[n].as_mv[0];
int_mv this_mv;
unsigned int bestsad = INT_MAX;
int r, c;
......
......@@ -62,7 +62,7 @@ typedef int (*vp9_full_search_fn_t)(MACROBLOCK *x, BLOCKD *d,
int_mv *ref_mv, int sad_per_bit,
int distance, vp9_variance_fn_ptr_t *fn_ptr,
int *mvjcost, int *mvcost[2],
int_mv *center_mv);
int_mv *center_mv, int n);
typedef int (*vp9_refining_search_fn_t)(MACROBLOCK *x, BLOCKD *d,
int_mv *ref_mv, int sad_per_bit,
......
......@@ -875,7 +875,8 @@ static int64_t rd_pick_intra4x4block(VP9_COMP *cpi, MACROBLOCK *x, int ib,
assert(ib < 16);
#if CONFIG_NEWBINTRAMODES
b->bmi.as_mode.context = vp9_find_bpred_context(xd, b);
xd->mode_info_context->bmi[ib].as_mode.context =
vp9_find_bpred_context(xd, b);
#endif
xd->mode_info_context->mbmi.txfm_size = TX_4X4;
for (mode = B_DC_PRED; mode < LEFT4X4; mode++) {
......@@ -892,7 +893,7 @@ static int64_t rd_pick_intra4x4block(VP9_COMP *cpi, MACROBLOCK *x, int ib,
}
#endif
b->bmi.as_mode.first = mode;
xd->mode_info_context->bmi[ib].as_mode.first = mode;
#if CONFIG_NEWBINTRAMODES
rate = bmode_costs[
mode == B_CONTEXT_PRED ? mode - CONTEXT_PRED_REPLACEMENTS : mode];
......@@ -905,7 +906,7 @@ static int64_t rd_pick_intra4x4block(VP9_COMP *cpi, MACROBLOCK *x, int ib,
src, src_stride,
*(b->base_dst) + b->dst, b->dst_stride);
b->bmi.as_mode.first = mode;
xd->mode_info_context->bmi[ib].as_mode.first = mode;
tx_type = get_tx_type_4x4(xd, ib);
if (tx_type != DCT_DCT) {
vp9_short_fht4x4(src_diff, coeff, 16, tx_type);
......@@ -939,7 +940,8 @@ static int64_t rd_pick_intra4x4block(VP9_COMP *cpi, MACROBLOCK *x, int ib,
vpx_memcpy(best_dqcoeff, BLOCK_OFFSET(xd->plane[0].dqcoeff, ib, 16), 32);
}
}
b->bmi.as_mode.first = (B_PREDICTION_MODE)(*best_mode);
xd->mode_info_context->bmi[ib].as_mode.first =
(B_PREDICTION_MODE)(*best_mode);
// inverse transform
if (best_tx_type != DCT_DCT)
......@@ -1111,7 +1113,7 @@ static int64_t rd_pick_intra8x8block(VP9_COMP *cpi, MACROBLOCK *x, int ib,
// FIXME rate for compound mode and second intrapred mode
rate = mode_costs[mode];
b->bmi.as_mode.first = mode;
xd->mode_info_context->bmi[ib].as_mode.first = mode;
vp9_intra8x8_predict(xd, b, mode, *(b->base_dst) + b->dst, b->dst_stride);
......@@ -1208,7 +1210,7 @@ static int64_t rd_pick_intra8x8block(VP9_COMP *cpi, MACROBLOCK *x, int ib,
*best_mode = mode;
}
}
b->bmi.as_mode.first = (*best_mode);
xd->mode_info_context->bmi[ib].as_mode.first = (*best_mode);
vp9_encode_intra8x8(x, ib);
if (xd->mode_info_context->mbmi.txfm_size == TX_8X8) {
......@@ -1607,7 +1609,6 @@ static int labels2mode(
Ones from this macroblock have to be pulled from the BLOCKD array
as they have not yet made it to the bmi array in our MB_MODE_INFO. */
for (i = 0; i < 16; ++i) {
BLOCKD *const d = xd->block + i;
const int row = i >> 2, col = i & 3;
B_PREDICTION_MODE m;
......@@ -1639,17 +1640,17 @@ static int labels2mode(
}
break;
case LEFT4X4:
this_mv->as_int = col ? d[-1].bmi.as_mv[0].as_int :
this_mv->as_int = col ? mic->bmi[i - 1].as_mv[0].as_int :
left_block_mv(xd, mic, i);
if (mbmi->second_ref_frame > 0)
this_second_mv->as_int = col ? d[-1].bmi.as_mv[1].as_int :
this_second_mv->as_int = col ? mic->bmi[i - 1].as_mv[1].as_int :
left_block_second_mv(xd, mic, i);
break;
case ABOVE4X4:
this_mv->as_int = row ? d[-4].bmi.as_mv[0].as_int :
this_mv->as_int = row ? mic->bmi[i - 4].as_mv[0].as_int :
above_block_mv(mic, i, mis);
if (mbmi->second_ref_frame > 0)
this_second_mv->as_int = row ? d[-4].bmi.as_mv[1].as_int :
this_second_mv->as_int = row ? mic->bmi[i - 4].as_mv[1].as_int :
above_block_second_mv(mic, i, mis);
break;
case ZERO4X4:
......@@ -1665,10 +1666,10 @@ static int labels2mode(
int_mv left_mv, left_second_mv;
left_second_mv.as_int = 0;
left_mv.as_int = col ? d[-1].bmi.as_mv[0].as_int :
left_mv.as_int = col ? mic->bmi[i - 1].as_mv[0].as_int :
left_block_mv(xd, mic, i);
if (mbmi->second_ref_frame > 0)
left_second_mv.as_int = col ? d[-1].bmi.as_mv[1].as_int :
left_second_mv.as_int = col ? mic->bmi[i - 1].as_mv[1].as_int :
left_block_second_mv(xd, mic, i);
if (left_mv.as_int == this_mv->as_int &&
......@@ -1685,9 +1686,9 @@ static int labels2mode(
#endif
}
d->bmi.as_mv[0].as_int = this_mv->as_int;
mic->bmi[i].as_mv[0].as_int = this_mv->as_int;
if (mbmi->second_ref_frame > 0)
d->bmi.as_mv[1].as_int = this_second_mv->as_int;
mic->bmi[i].as_mv[1].as_int = this_second_mv->as_int;
x->partition_info->bmi[i].mode = m;
x->partition_info->bmi[i].mv.as_int = this_mv->as_int;
......@@ -1733,7 +1734,7 @@ static int64_t encode_inter_mb_segment(VP9_COMMON *const cm,
xd->plane[0].pre[0].stride,
*(bd->base_dst) + bd->dst,
bd->dst_stride,
&bd->bmi.as_mv[0],
&xd->mode_info_context->bmi[i].as_mv[0],
&xd->scale_factor[0],
4, 4, 0 /* no avg */, &xd->subpix);
......@@ -1748,7 +1749,8 @@ static int64_t encode_inter_mb_segment(VP9_COMMON *const cm,
vp9_build_inter_predictor(
second_pre, xd->plane[0].pre[1].stride,
*(bd->base_dst) + bd->dst, bd->dst_stride,
&bd->bmi.as_mv[1], &xd->scale_factor[1], 4, 4, 1,
&xd->mode_info_context->bmi[i].as_mv[1],
&xd->scale_factor[1], 4, 4, 1,
&xd->subpix);
}
......@@ -1825,7 +1827,8 @@ static int64_t encode_inter_mb_segment_8x8(VP9_COMMON *const cm,
vp9_build_inter_predictor(
pre, xd->plane[0].pre[which_mv].stride,
*(bd->base_dst) + bd->dst, bd->dst_stride,
&bd->bmi.as_mv[which_mv], &xd->scale_factor[which_mv], 8, 8,
&xd->mode_info_context->bmi[ib].as_mv[which_mv],
&xd->scale_factor[which_mv], 8, 8,
which_mv, &xd->subpix);
}
......@@ -2060,9 +2063,11 @@ static void rd_check_segment_txsize(VP9_COMP *cpi, MACROBLOCK *x,
// use previous block's result as next block's MV predictor.
if (segmentation == PARTITIONING_4X4 && i > 0) {
bsi->mvp.as_int = x->e_mbd.block[i - 1].bmi.as_mv[0].as_int;
bsi->mvp.as_int =
x->e_mbd.mode_info_context->bmi[i - 1].as_mv[0].as_int;
if (i == 4 || i == 8 || i == 12)
bsi->mvp.as_int = x->e_mbd.block[i - 4].bmi.as_mv[0].as_int;
bsi->mvp.as_int =
x->e_mbd.mode_info_context->bmi[i - 4].as_mv[0].as_int;
step_param = 2;
}
}
......@@ -2106,15 +2111,18 @@ static void rd_check_segment_txsize(VP9_COMP *cpi, MACROBLOCK *x,
thissme = cpi->full_search_sad(x, e, &mvp_full,
sadpb, 16, v_fn_ptr,
x->nmvjointcost, x->mvcost,
bsi->ref_mv);
bsi->ref_mv,
n);
if (thissme < bestsme) {
bestsme = thissme;
mode_mv[NEW4X4].as_int = e->bmi.as_mv[0].as_int;
mode_mv[NEW4X4].as_int =
x->e_mbd.mode_info_context->bmi[n].as_mv[0].as_int;
} else {
/* The full search result is actually worse so re-instate the
* previous best vector */
e->bmi.as_mv[0].as_int = mode_mv[NEW4X4].as_int;
x->e_mbd.mode_info_context->bmi[n].as_mv[0].as_int =
mode_mv[NEW4X4].as_int;
}
}
}
......@@ -2459,11 +2467,10 @@ static int rd_pick_best_mbsegmentation(VP9_COMP *cpi, MACROBLOCK *x,
/* set it to the best */
for (i = 0; i < 16; i++) {
BLOCKD *bd = &x->e_mbd.block[i];
bd->bmi.as_mv[0].as_int = bsi.mvs[i].as_int;
x->e_mbd.mode_info_context->bmi[i].as_mv[0].as_int = bsi.mvs[i].as_int;
if (mbmi->second_ref_frame > 0)
bd->bmi.as_mv[1].as_int = bsi.second_mvs[i].as_int;
x->e_mbd.mode_info_context->bmi[i].as_mv[1].as_int =
bsi.second_mvs[i].as_int;
x->e_mbd.plane[0].eobs[i] = bsi.eobs[i];
}
......@@ -2554,10 +2561,6 @@ static void set_i8x8_block_modes(MACROBLOCK *x, int modes[4]) {
// printf("%d,%d,%d,%d\n",
// modes[0], modes[1], modes[2], modes[3]);
}
for (i = 0; i < 16; i++) {
xd->block[i].bmi = xd->mode_info_context->bmi[i];
}
}
extern void vp9_calc_ref_probs(int *count, vp9_prob *probs);
......@@ -2966,8 +2969,8 @@ static int64_t handle_inter_mode(VP9_COMP *cpi, MACROBLOCK *x,
x->nmvjointcost, x->mvcost,
&dis, &sse);
}
d->bmi.as_mv[0].as_int = tmp_mv.as_int;
frame_mv[NEWMV][refs[0]].as_int = d->bmi.as_mv[0].as_int;
frame_mv[NEWMV][refs[0]].as_int =
xd->mode_info_context->bmi[0].as_mv[0].as_int = tmp_mv.as_int;
// Add the new motion vector cost to our rolling cost variable
*rate2 += vp9_mv_bit_cost(&tmp_mv, &ref_mv[0],
......@@ -3676,7 +3679,7 @@ static void rd_pick_inter_mode(VP9_COMP *cpi, MACROBLOCK *x,
vpx_memcpy(&tmp_best_partition, x->partition_info,
sizeof(PARTITION_INFO));
for (i = 0; i < 16; i++) {
tmp_best_bmodes[i] = xd->block[i].bmi;
tmp_best_bmodes[i] = xd->mode_info_context->bmi[i];
}
pred_exists = 1;
}
......@@ -3711,7 +3714,7 @@ static void rd_pick_inter_mode(VP9_COMP *cpi, MACROBLOCK *x,
vpx_memcpy(x->partition_info, &tmp_best_partition,
sizeof(PARTITION_INFO));
for (i = 0; i < 16; i++) {
xd->block[i].bmi = xd->mode_info_context->bmi[i] = tmp_best_bmodes[i];
xd->mode_info_context->bmi[i] = tmp_best_bmodes[i];
}
}
......@@ -3920,7 +3923,7 @@ static void rd_pick_inter_mode(VP9_COMP *cpi, MACROBLOCK *x,
|| (this_mode == I8X8_PRED)
|| (this_mode == SPLITMV))
for (i = 0; i < 16; i++) {
best_bmodes[i] = xd->block[i].bmi;
best_bmodes[i] = xd->mode_info_context->bmi[i];
}
}
......@@ -4049,7 +4052,6 @@ static void rd_pick_inter_mode(VP9_COMP *cpi, MACROBLOCK *x,
if (best_mbmode.mode == I4X4_PRED) {
for (i = 0; i < 16; i++) {
xd->mode_info_context->bmi[i].as_mode = best_bmodes[i].as_mode;
xd->block[i].bmi.as_mode = xd->mode_info_context->bmi[i].as_mode;
}
}
......
......@@ -132,6 +132,7 @@ static int temporal_filter_find_matching_mb_c(VP9_COMP *cpi,
BLOCKD *d = &x->e_mbd.block[0];
int_mv best_ref_mv1;
int_mv best_ref_mv1_full; /* full-pixel value of best_ref_mv1 */
int_mv *ref_mv;
// Save input state
struct buf_2d src = x->plane[0].src;
......@@ -158,7 +159,8 @@ static int temporal_filter_find_matching_mb_c(VP9_COMP *cpi,
/*cpi->sf.search_method == HEX*/
// TODO Check that the 16x16 vf & sdf are selected here
// Ignore mv costing by sending NULL pointer instead of cost arrays
bestsme = vp9_hex_search(x, d, &best_ref_mv1_full, &d->bmi.as_mv[0],
ref_mv = &x->e_mbd.mode_info_context->bmi[0].as_mv[0];
bestsme = vp9_hex_search(x, d, &best_ref_mv1_full, ref_mv,
step_param, sadpb, &cpi->fn_ptr[BLOCK_16X16],
NULL, NULL, NULL, NULL,
&best_ref_mv1);
......@@ -170,7 +172,7 @@ static int temporal_filter_find_matching_mb_c(VP9_COMP *cpi,
int distortion;
unsigned int sse;
// Ignore mv costing by sending NULL pointer instead of cost array
bestsme = cpi->find_fractional_mv_step(x, d, &d->bmi.as_mv[0],
bestsme = cpi->find_fractional_mv_step(x, d, ref_mv,
&best_ref_mv1,
x->errorperbit,
&cpi->fn_ptr[BLOCK_16X16],
......@@ -246,8 +248,8 @@ static void temporal_filter_iterate_c(VP9_COMP *cpi,
if (cpi->frames[frame] == NULL)
continue;
mbd->block[0].bmi.as_mv[0].as_mv.row = 0;
mbd->block[0].bmi.as_mv[0].as_mv.col = 0;
mbd->mode_info_context->bmi[0].as_mv[0].as_mv.row = 0;
mbd->mode_info_context->bmi[0].as_mv[0].as_mv.col = 0;
if (frame == alt_ref_index) {
filter_weight = 2;
......@@ -280,8 +282,8 @@ static void temporal_filter_iterate_c(VP9_COMP *cpi,
cpi->frames[frame]->u_buffer + mb_uv_offset,
cpi->frames[frame]->v_buffer + mb_uv_offset,
cpi->frames[frame]->y_stride,
mbd->block[0].bmi.as_mv[0].as_mv.row,
mbd->block[0].bmi.as_mv[0].as_mv.col,
mbd->mode_info_context->bmi[0].as_mv[0].as_mv.row,
mbd->mode_info_context->bmi[0].as_mv[0].as_mv.col,
predictor);
// Apply the filter (YUV)
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment