Commit e6485581 authored by Ronald S. Bultje's avatar Ronald S. Bultje
Browse files

Remove splitmv.

We leave it in rdopt.c as a local define for now - this can be removed
later. In all other places, we remove it, thereby slightly decreasing
the size of some arrays in the bitstream.

Change-Id: Ic2a9beb97a4eda0b086f62c039d994b192f99ca5
parent 1efa79d3
...@@ -82,12 +82,11 @@ typedef enum { ...@@ -82,12 +82,11 @@ typedef enum {
NEARMV, NEARMV,
ZEROMV, ZEROMV,
NEWMV, NEWMV,
SPLITMV,
MB_MODE_COUNT MB_MODE_COUNT
} MB_PREDICTION_MODE; } MB_PREDICTION_MODE;
static INLINE int is_inter_mode(MB_PREDICTION_MODE mode) { static INLINE int is_inter_mode(MB_PREDICTION_MODE mode) {
return mode >= NEARESTMV && mode <= SPLITMV; return mode >= NEARESTMV && mode <= NEWMV;
} }
#define INTRA_MODE_COUNT (TM_PRED + 1) #define INTRA_MODE_COUNT (TM_PRED + 1)
...@@ -122,7 +121,7 @@ typedef enum { ...@@ -122,7 +121,7 @@ typedef enum {
#define VP9_UV_MODES (TM_PRED + 1) #define VP9_UV_MODES (TM_PRED + 1)
#define VP9_I32X32_MODES (TM_PRED + 1) #define VP9_I32X32_MODES (TM_PRED + 1)
#define VP9_MVREFS (1 + SPLITMV - NEARESTMV) #define VP9_MVREFS (1 + NEWMV - NEARESTMV)
#define WHT_UPSCALE_FACTOR 2 #define WHT_UPSCALE_FACTOR 2
...@@ -690,7 +689,6 @@ static INLINE void foreach_predicted_block_in_plane( ...@@ -690,7 +689,6 @@ static INLINE void foreach_predicted_block_in_plane(
const MACROBLOCKD* const xd, BLOCK_SIZE_TYPE bsize, int plane, const MACROBLOCKD* const xd, BLOCK_SIZE_TYPE bsize, int plane,
foreach_predicted_block_visitor visit, void *arg) { foreach_predicted_block_visitor visit, void *arg) {
int i, x, y; int i, x, y;
const MB_PREDICTION_MODE mode = xd->mode_info_context->mbmi.mode;
// block sizes in number of 4x4 blocks log 2 ("*_b") // block sizes in number of 4x4 blocks log 2 ("*_b")
// 4x4=0, 8x8=2, 16x16=4, 32x32=6, 64x64=8 // 4x4=0, 8x8=2, 16x16=4, 32x32=6, 64x64=8
...@@ -701,7 +699,8 @@ static INLINE void foreach_predicted_block_in_plane( ...@@ -701,7 +699,8 @@ static INLINE void foreach_predicted_block_in_plane(
// size of the predictor to use. // size of the predictor to use.
int pred_w, pred_h; int pred_w, pred_h;
if (mode == SPLITMV) { if (xd->mode_info_context->mbmi.sb_type < BLOCK_SIZE_SB8X8) {
assert(bsize == BLOCK_SIZE_SB8X8);
pred_w = 0; pred_w = 0;
pred_h = 0; pred_h = 0;
} else { } else {
......
...@@ -192,11 +192,6 @@ void vp9_accum_mv_refs(VP9_COMMON *pc, ...@@ -192,11 +192,6 @@ void vp9_accum_mv_refs(VP9_COMMON *pc,
++mv_ref_ct[context][2][0]; ++mv_ref_ct[context][2][0];
} else { } else {
++mv_ref_ct[context][2][1]; ++mv_ref_ct[context][2][1];
if (m == NEWMV) {
++mv_ref_ct[context][3][0];
} else {
++mv_ref_ct[context][3][1];
}
} }
} }
} }
......
...@@ -24,11 +24,10 @@ static void lower_mv_precision(int_mv *mv, int usehp) { ...@@ -24,11 +24,10 @@ static void lower_mv_precision(int_mv *mv, int usehp) {
} }
} }
vp9_prob *vp9_mv_ref_probs(VP9_COMMON *pc, vp9_prob p[4], int context) { vp9_prob *vp9_mv_ref_probs(VP9_COMMON *pc, vp9_prob *p, int context) {
p[0] = pc->fc.vp9_mode_contexts[context][0]; p[0] = pc->fc.vp9_mode_contexts[context][0];
p[1] = pc->fc.vp9_mode_contexts[context][1]; p[1] = pc->fc.vp9_mode_contexts[context][1];
p[2] = pc->fc.vp9_mode_contexts[context][2]; p[2] = pc->fc.vp9_mode_contexts[context][2];
p[3] = pc->fc.vp9_mode_contexts[context][3];
return p; return p;
} }
......
...@@ -30,7 +30,6 @@ static void lf_init_lut(loop_filter_info_n *lfi) { ...@@ -30,7 +30,6 @@ static void lf_init_lut(loop_filter_info_n *lfi) {
lfi->mode_lf_lut[NEARESTMV] = 2; lfi->mode_lf_lut[NEARESTMV] = 2;
lfi->mode_lf_lut[NEARMV] = 2; lfi->mode_lf_lut[NEARMV] = 2;
lfi->mode_lf_lut[NEWMV] = 2; lfi->mode_lf_lut[NEWMV] = 2;
lfi->mode_lf_lut[SPLITMV] = 3;
} }
void vp9_loop_filter_update_sharpness(loop_filter_info_n *lfi, void vp9_loop_filter_update_sharpness(loop_filter_info_n *lfi,
......
...@@ -220,7 +220,8 @@ void vp9_find_mv_refs_idx(VP9_COMMON *cm, MACROBLOCKD *xd, MODE_INFO *here, ...@@ -220,7 +220,8 @@ void vp9_find_mv_refs_idx(VP9_COMMON *cm, MACROBLOCKD *xd, MODE_INFO *here,
add_candidate_mv(mv_ref_list, candidate_scores, add_candidate_mv(mv_ref_list, candidate_scores,
&refmv_count, c_refmv, 16); &refmv_count, c_refmv, 16);
} }
split_count += (candidate_mi->mbmi.mode == SPLITMV); split_count += (candidate_mi->mbmi.sb_type < BLOCK_SIZE_SB8X8 &&
candidate_mi->mbmi.ref_frame != INTRA_FRAME);
// Count number of neihgbours coded intra and zeromv // Count number of neihgbours coded intra and zeromv
intra_count += (candidate_mi->mbmi.mode < NEARESTMV); intra_count += (candidate_mi->mbmi.mode < NEARESTMV);
......
...@@ -392,8 +392,10 @@ static void build_inter_predictors(int plane, int block, ...@@ -392,8 +392,10 @@ static void build_inter_predictors(int plane, int block,
assert(x < bw); assert(x < bw);
assert(y < bh); assert(y < bh);
assert(xd->mode_info_context->mbmi.mode == SPLITMV || 4 << pred_w == bw); assert(xd->mode_info_context->mbmi.sb_type < BLOCK_SIZE_SB8X8 ||
assert(xd->mode_info_context->mbmi.mode == SPLITMV || 4 << pred_h == bh); 4 << pred_w == bw);
assert(xd->mode_info_context->mbmi.sb_type < BLOCK_SIZE_SB8X8 ||
4 << pred_h == bh);
for (which_mv = 0; which_mv < 1 + use_second_ref; ++which_mv) { for (which_mv = 0; which_mv < 1 + use_second_ref; ++which_mv) {
// source // source
...@@ -412,7 +414,7 @@ static void build_inter_predictors(int plane, int block, ...@@ -412,7 +414,7 @@ static void build_inter_predictors(int plane, int block,
MV split_chroma_mv; MV split_chroma_mv;
int_mv clamped_mv; int_mv clamped_mv;
if (xd->mode_info_context->mbmi.mode == SPLITMV) { if (xd->mode_info_context->mbmi.sb_type < BLOCK_SIZE_SB8X8) {
if (plane == 0) { if (plane == 0) {
mv = &xd->mode_info_context->bmi[block].as_mv[which_mv].as_mv; mv = &xd->mode_info_context->bmi[block].as_mv[which_mv].as_mv;
} else { } else {
......
...@@ -569,15 +569,12 @@ static void read_mb_modes_mv(VP9D_COMP *pbi, MODE_INFO *mi, MB_MODE_INFO *mbmi, ...@@ -569,15 +569,12 @@ static void read_mb_modes_mv(VP9D_COMP *pbi, MODE_INFO *mi, MB_MODE_INFO *mbmi,
// If the segment level skip mode enabled // If the segment level skip mode enabled
if (vp9_segfeature_active(xd, mbmi->segment_id, SEG_LVL_SKIP)) { if (vp9_segfeature_active(xd, mbmi->segment_id, SEG_LVL_SKIP)) {
mbmi->mode = ZEROMV; mbmi->mode = ZEROMV;
} else { } else if (bsize >= BLOCK_SIZE_SB8X8) {
if (bsize >= BLOCK_SIZE_SB8X8)
mbmi->mode = read_sb_mv_ref(r, mv_ref_p); mbmi->mode = read_sb_mv_ref(r, mv_ref_p);
else
mbmi->mode = SPLITMV;
vp9_accum_mv_refs(cm, mbmi->mode, mbmi->mb_mode_context[ref_frame]); vp9_accum_mv_refs(cm, mbmi->mode, mbmi->mb_mode_context[ref_frame]);
} }
if (mbmi->mode != ZEROMV) { if (bsize < BLOCK_SIZE_SB8X8 || mbmi->mode != ZEROMV) {
vp9_find_best_ref_mvs(xd, vp9_find_best_ref_mvs(xd,
mbmi->ref_mvs[ref_frame], mbmi->ref_mvs[ref_frame],
&nearest, &nearby); &nearest, &nearby);
...@@ -619,7 +616,7 @@ static void read_mb_modes_mv(VP9D_COMP *pbi, MODE_INFO *mi, MB_MODE_INFO *mbmi, ...@@ -619,7 +616,7 @@ static void read_mb_modes_mv(VP9D_COMP *pbi, MODE_INFO *mi, MB_MODE_INFO *mbmi,
second_ref_frame, mbmi->ref_mvs[second_ref_frame], second_ref_frame, mbmi->ref_mvs[second_ref_frame],
cm->ref_frame_sign_bias); cm->ref_frame_sign_bias);
if (mbmi->mode != ZEROMV) { if (bsize < BLOCK_SIZE_SB8X8 || mbmi->mode != ZEROMV) {
vp9_find_best_ref_mvs(xd, vp9_find_best_ref_mvs(xd,
mbmi->ref_mvs[second_ref_frame], mbmi->ref_mvs[second_ref_frame],
&nearest_second, &nearest_second,
...@@ -627,12 +624,10 @@ static void read_mb_modes_mv(VP9D_COMP *pbi, MODE_INFO *mi, MB_MODE_INFO *mbmi, ...@@ -627,12 +624,10 @@ static void read_mb_modes_mv(VP9D_COMP *pbi, MODE_INFO *mi, MB_MODE_INFO *mbmi,
best_mv_second.as_int = mbmi->ref_mvs[second_ref_frame][0].as_int; best_mv_second.as_int = mbmi->ref_mvs[second_ref_frame][0].as_int;
} }
} }
} }
mbmi->uv_mode = DC_PRED; mbmi->uv_mode = DC_PRED;
switch (mbmi->mode) { if (mbmi->sb_type < BLOCK_SIZE_SB8X8) {
case SPLITMV:
mbmi->need_to_clamp_mvs = 0; mbmi->need_to_clamp_mvs = 0;
for (idy = 0; idy < 2; idy += bh) { for (idy = 0; idy < 2; idy += bh) {
for (idx = 0; idx < 2; idx += bw) { for (idx = 0; idx < 2; idx += bw) {
...@@ -661,33 +656,33 @@ static void read_mb_modes_mv(VP9D_COMP *pbi, MODE_INFO *mi, MB_MODE_INFO *mbmi, ...@@ -661,33 +656,33 @@ static void read_mb_modes_mv(VP9D_COMP *pbi, MODE_INFO *mi, MB_MODE_INFO *mbmi,
decode_mv(r, &secondmv.as_mv, &best_mv_second.as_mv, nmvc, decode_mv(r, &secondmv.as_mv, &best_mv_second.as_mv, nmvc,
&cm->fc.NMVcount, xd->allow_high_precision_mv); &cm->fc.NMVcount, xd->allow_high_precision_mv);
#ifdef VPX_MODE_COUNT #ifdef VPX_MODE_COUNT
vp9_mv_cont_count[mv_contz][3]++; vp9_mv_cont_count[mv_contz][3]++;
#endif #endif
break; break;
case NEARESTMV: case NEARESTMV:
blockmv.as_int = nearest.as_int; blockmv.as_int = nearest.as_int;
if (mbmi->second_ref_frame > 0) if (mbmi->second_ref_frame > 0)
secondmv.as_int = nearest_second.as_int; secondmv.as_int = nearest_second.as_int;
#ifdef VPX_MODE_COUNT #ifdef VPX_MODE_COUNT
vp9_mv_cont_count[mv_contz][0]++; vp9_mv_cont_count[mv_contz][0]++;
#endif #endif
break; break;
case NEARMV: case NEARMV:
blockmv.as_int = nearby.as_int; blockmv.as_int = nearby.as_int;
if (mbmi->second_ref_frame > 0) if (mbmi->second_ref_frame > 0)
secondmv.as_int = nearby_second.as_int; secondmv.as_int = nearby_second.as_int;
#ifdef VPX_MODE_COUNT #ifdef VPX_MODE_COUNT
vp9_mv_cont_count[mv_contz][1]++; vp9_mv_cont_count[mv_contz][1]++;
#endif #endif
break; break;
case ZEROMV: case ZEROMV:
blockmv.as_int = 0; blockmv.as_int = 0;
if (mbmi->second_ref_frame > 0) if (mbmi->second_ref_frame > 0)
secondmv.as_int = 0; secondmv.as_int = 0;
#ifdef VPX_MODE_COUNT #ifdef VPX_MODE_COUNT
vp9_mv_cont_count[mv_contz][2]++; vp9_mv_cont_count[mv_contz][2]++;
#endif #endif
break; break;
default: default:
break; break;
...@@ -700,13 +695,14 @@ static void read_mb_modes_mv(VP9D_COMP *pbi, MODE_INFO *mi, MB_MODE_INFO *mbmi, ...@@ -700,13 +695,14 @@ static void read_mb_modes_mv(VP9D_COMP *pbi, MODE_INFO *mi, MB_MODE_INFO *mbmi,
vpx_memcpy(&mi->bmi[j + i * 2], &mi->bmi[j], sizeof(mi->bmi[j])); vpx_memcpy(&mi->bmi[j + i * 2], &mi->bmi[j], sizeof(mi->bmi[j]));
for (i = 1; i < bw; ++i) for (i = 1; i < bw; ++i)
vpx_memcpy(&mi->bmi[j + i], &mi->bmi[j], sizeof(mi->bmi[j])); vpx_memcpy(&mi->bmi[j + i], &mi->bmi[j], sizeof(mi->bmi[j]));
mi->mbmi.mode = blockmode;
} }
} }
mv0->as_int = mi->bmi[3].as_mv[0].as_int; mv0->as_int = mi->bmi[3].as_mv[0].as_int;
mv1->as_int = mi->bmi[3].as_mv[1].as_int; mv1->as_int = mi->bmi[3].as_mv[1].as_int;
break; /* done with SPLITMV */ } else {
switch (mbmi->mode) {
case NEARMV: case NEARMV:
// Clip "next_nearest" so that it does not extend to far out of image // Clip "next_nearest" so that it does not extend to far out of image
assign_and_clamp_mv(mv0, &nearby, mb_to_left_edge, assign_and_clamp_mv(mv0, &nearby, mb_to_left_edge,
...@@ -759,10 +755,11 @@ static void read_mb_modes_mv(VP9D_COMP *pbi, MODE_INFO *mi, MB_MODE_INFO *mbmi, ...@@ -759,10 +755,11 @@ static void read_mb_modes_mv(VP9D_COMP *pbi, MODE_INFO *mi, MB_MODE_INFO *mbmi,
} }
break; break;
default: default:
;
#if CONFIG_DEBUG #if CONFIG_DEBUG
assert(0); assert(0);
#endif #endif
break;
}
} }
} else { } else {
// required for left and above block mv // required for left and above block mv
......
...@@ -512,7 +512,7 @@ static void pack_mb_tokens(vp9_writer* const bc, ...@@ -512,7 +512,7 @@ static void pack_mb_tokens(vp9_writer* const bc,
static void write_sb_mv_ref(vp9_writer *bc, MB_PREDICTION_MODE m, static void write_sb_mv_ref(vp9_writer *bc, MB_PREDICTION_MODE m,
const vp9_prob *p) { const vp9_prob *p) {
#if CONFIG_DEBUG #if CONFIG_DEBUG
assert(NEARESTMV <= m && m < SPLITMV); assert(NEARESTMV <= m && m <= NEWMV);
#endif #endif
write_token(bc, vp9_sb_mv_ref_tree, p, write_token(bc, vp9_sb_mv_ref_tree, p,
vp9_sb_mv_ref_encoding_array - NEARESTMV + m); vp9_sb_mv_ref_encoding_array - NEARESTMV + m);
...@@ -717,12 +717,12 @@ static void pack_inter_mode_mvs(VP9_COMP *cpi, MODE_INFO *m, ...@@ -717,12 +717,12 @@ static void pack_inter_mode_mvs(VP9_COMP *cpi, MODE_INFO *m,
// If segment skip is not enabled code the mode. // If segment skip is not enabled code the mode.
if (!vp9_segfeature_active(xd, segment_id, SEG_LVL_SKIP)) { if (!vp9_segfeature_active(xd, segment_id, SEG_LVL_SKIP)) {
if (mi->sb_type >= BLOCK_SIZE_SB8X8) if (mi->sb_type >= BLOCK_SIZE_SB8X8) {
write_sb_mv_ref(bc, mode, mv_ref_p); write_sb_mv_ref(bc, mode, mv_ref_p);
vp9_accum_mv_refs(&cpi->common, mode, mi->mb_mode_context[rf]); vp9_accum_mv_refs(&cpi->common, mode, mi->mb_mode_context[rf]);
} }
}
if (is_inter_mode(mode)) {
if (cpi->common.mcomp_filter_type == SWITCHABLE) { if (cpi->common.mcomp_filter_type == SWITCHABLE) {
write_token(bc, vp9_switchable_interp_tree, write_token(bc, vp9_switchable_interp_tree,
vp9_get_pred_probs(&cpi->common, xd, vp9_get_pred_probs(&cpi->common, xd,
...@@ -732,7 +732,6 @@ static void pack_inter_mode_mvs(VP9_COMP *cpi, MODE_INFO *m, ...@@ -732,7 +732,6 @@ static void pack_inter_mode_mvs(VP9_COMP *cpi, MODE_INFO *m,
} else { } else {
assert(mi->interp_filter == cpi->common.mcomp_filter_type); assert(mi->interp_filter == cpi->common.mcomp_filter_type);
} }
}
// does the feature use compound prediction or not // does the feature use compound prediction or not
// (if not specified at the frame/segment level) // (if not specified at the frame/segment level)
...@@ -741,21 +740,7 @@ static void pack_inter_mode_mvs(VP9_COMP *cpi, MODE_INFO *m, ...@@ -741,21 +740,7 @@ static void pack_inter_mode_mvs(VP9_COMP *cpi, MODE_INFO *m,
vp9_get_pred_prob(pc, xd, PRED_COMP)); vp9_get_pred_prob(pc, xd, PRED_COMP));
} }
switch (mode) { /* new, split require MVs */ if (xd->mode_info_context->mbmi.sb_type < BLOCK_SIZE_SB8X8) {
case NEWMV:
#ifdef ENTROPY_STATS
active_section = 5;
#endif
vp9_encode_mv(bc,
&mi->mv[0].as_mv, &mi->best_mv.as_mv,
nmvc, xd->allow_high_precision_mv);
if (mi->second_ref_frame > 0)
vp9_encode_mv(bc,
&mi->mv[1].as_mv, &mi->best_second_mv.as_mv,
nmvc, xd->allow_high_precision_mv);
break;
case SPLITMV: {
int j; int j;
MB_PREDICTION_MODE blockmode; MB_PREDICTION_MODE blockmode;
int_mv blockmv; int_mv blockmv;
...@@ -788,10 +773,18 @@ static void pack_inter_mode_mvs(VP9_COMP *cpi, MODE_INFO *m, ...@@ -788,10 +773,18 @@ static void pack_inter_mode_mvs(VP9_COMP *cpi, MODE_INFO *m,
#ifdef MODE_STATS #ifdef MODE_STATS
++count_mb_seg[mi->partitioning]; ++count_mb_seg[mi->partitioning];
#endif #endif
break; } else if (mode == NEWMV) {
} #ifdef ENTROPY_STATS
default: active_section = 5;
break; #endif
vp9_encode_mv(bc,
&mi->mv[0].as_mv, &mi->best_mv.as_mv,
nmvc, xd->allow_high_precision_mv);
if (mi->second_ref_frame > 0)
vp9_encode_mv(bc,
&mi->mv[1].as_mv, &mi->best_second_mv.as_mv,
nmvc, xd->allow_high_precision_mv);
} }
} }
} }
......
...@@ -332,7 +332,9 @@ static void update_state(VP9_COMP *cpi, ...@@ -332,7 +332,9 @@ static void update_state(VP9_COMP *cpi,
MACROBLOCKD *const xd = &x->e_mbd; MACROBLOCKD *const xd = &x->e_mbd;
MODE_INFO *mi = &ctx->mic; MODE_INFO *mi = &ctx->mic;
MB_MODE_INFO *const mbmi = &xd->mode_info_context->mbmi; MB_MODE_INFO *const mbmi = &xd->mode_info_context->mbmi;
int mb_mode = mi->mbmi.mode; #if CONFIG_DEBUG || CONFIG_INTERNAL_STATS
MB_PREDICTION_MODE mb_mode = mi->mbmi.mode;
#endif
int mb_mode_index = ctx->best_mode_index; int mb_mode_index = ctx->best_mode_index;
const int mis = cpi->common.mode_info_stride; const int mis = cpi->common.mode_info_stride;
const int bh = 1 << mi_height_log2(bsize), bw = 1 << mi_width_log2(bsize); const int bh = 1 << mi_height_log2(bsize), bw = 1 << mi_width_log2(bsize);
...@@ -362,7 +364,8 @@ static void update_state(VP9_COMP *cpi, ...@@ -362,7 +364,8 @@ static void update_state(VP9_COMP *cpi,
ctx->txfm_rd_diff[ALLOW_32X32] = ctx->txfm_rd_diff[ALLOW_16X16]; ctx->txfm_rd_diff[ALLOW_32X32] = ctx->txfm_rd_diff[ALLOW_16X16];
} }
if (mb_mode == SPLITMV) { if (mbmi->ref_frame != INTRA_FRAME &&
mbmi->sb_type < BLOCK_SIZE_SB8X8) {
vpx_memcpy(x->partition_info, &ctx->partition_info, vpx_memcpy(x->partition_info, &ctx->partition_info,
sizeof(PARTITION_INFO)); sizeof(PARTITION_INFO));
...@@ -448,7 +451,8 @@ static void update_state(VP9_COMP *cpi, ...@@ -448,7 +451,8 @@ static void update_state(VP9_COMP *cpi,
*/ */
// Note how often each mode chosen as best // Note how often each mode chosen as best
cpi->mode_chosen_counts[mb_mode_index]++; cpi->mode_chosen_counts[mb_mode_index]++;
if (mbmi->mode == SPLITMV || mbmi->mode == NEWMV) { if (mbmi->ref_frame != INTRA_FRAME &&
(mbmi->sb_type < BLOCK_SIZE_SB8X8 || mbmi->mode == NEWMV)) {
int_mv best_mv, best_second_mv; int_mv best_mv, best_second_mv;
MV_REFERENCE_FRAME rf = mbmi->ref_frame; MV_REFERENCE_FRAME rf = mbmi->ref_frame;
best_mv.as_int = ctx->best_ref_mv.as_int; best_mv.as_int = ctx->best_ref_mv.as_int;
...@@ -1617,7 +1621,7 @@ static void encode_superblock(VP9_COMP *cpi, TOKENEXTRA **t, ...@@ -1617,7 +1621,7 @@ static void encode_superblock(VP9_COMP *cpi, TOKENEXTRA **t,
cpi->zbin_mode_boost = GF_ZEROMV_ZBIN_BOOST; cpi->zbin_mode_boost = GF_ZEROMV_ZBIN_BOOST;
else else
cpi->zbin_mode_boost = LF_ZEROMV_ZBIN_BOOST; cpi->zbin_mode_boost = LF_ZEROMV_ZBIN_BOOST;
} else if (mbmi->mode == SPLITMV) { } else if (mbmi->sb_type < BLOCK_SIZE_SB8X8) {
cpi->zbin_mode_boost = SPLIT_MV_ZBIN_BOOST; cpi->zbin_mode_boost = SPLIT_MV_ZBIN_BOOST;
} else { } else {
cpi->zbin_mode_boost = MV_ZBIN_BOOST; cpi->zbin_mode_boost = MV_ZBIN_BOOST;
......
...@@ -50,6 +50,7 @@ DECLARE_ALIGNED(16, extern const uint8_t, ...@@ -50,6 +50,7 @@ DECLARE_ALIGNED(16, extern const uint8_t,
vp9_pt_energy_class[MAX_ENTROPY_TOKENS]); vp9_pt_energy_class[MAX_ENTROPY_TOKENS]);
#define I4X4_PRED 0x8000 #define I4X4_PRED 0x8000
#define SPLITMV 0x10000
const MODE_DEFINITION vp9_mode_order[MAX_MODES] = { const MODE_DEFINITION vp9_mode_order[MAX_MODES] = {
{ZEROMV, LAST_FRAME, NONE}, {ZEROMV, LAST_FRAME, NONE},
...@@ -988,7 +989,7 @@ int vp9_cost_mv_ref(VP9_COMP *cpi, ...@@ -988,7 +989,7 @@ int vp9_cost_mv_ref(VP9_COMP *cpi,
VP9_COMMON *pc = &cpi->common; VP9_COMMON *pc = &cpi->common;
vp9_prob p [VP9_MVREFS - 1]; vp9_prob p [VP9_MVREFS - 1];
assert(NEARESTMV <= m && m <= SPLITMV); assert(NEARESTMV <= m && m <= NEWMV);
vp9_mv_ref_probs(pc, p, mode_context); vp9_mv_ref_probs(pc, p, mode_context);
return cost_token(vp9_sb_mv_ref_tree, p, return cost_token(vp9_sb_mv_ref_tree, p,
vp9_sb_mv_ref_encoding_array - NEARESTMV + m); vp9_sb_mv_ref_encoding_array - NEARESTMV + m);
...@@ -1536,6 +1537,7 @@ static int rd_pick_best_mbsegmentation(VP9_COMP *cpi, MACROBLOCK *x, ...@@ -1536,6 +1537,7 @@ static int rd_pick_best_mbsegmentation(VP9_COMP *cpi, MACROBLOCK *x,
*returndistortion = bsi.d; *returndistortion = bsi.d;
*returnyrate = bsi.segment_yrate; *returnyrate = bsi.segment_yrate;
*skippable = vp9_sby_is_skippable(&x->e_mbd, BLOCK_SIZE_SB8X8); *skippable = vp9_sby_is_skippable(&x->e_mbd, BLOCK_SIZE_SB8X8);
mbmi->mode = bsi.modes[3];
return (int)(bsi.segment_rd); return (int)(bsi.segment_rd);
} }
...@@ -2896,7 +2898,6 @@ int64_t vp9_rd_pick_inter_mode_sb(VP9_COMP *cpi, MACROBLOCK *x, ...@@ -2896,7 +2898,6 @@ int64_t vp9_rd_pick_inter_mode_sb(VP9_COMP *cpi, MACROBLOCK *x,
compmode_cost = compmode_cost =
vp9_cost_bit(vp9_get_pred_prob(cm, xd, PRED_COMP), is_comp_pred); vp9_cost_bit(vp9_get_pred_prob(cm, xd, PRED_COMP), is_comp_pred);
mbmi->mode = this_mode;
} else { } else {
YV12_BUFFER_CONFIG *scaled_ref_frame[2] = {NULL, NULL}; YV12_BUFFER_CONFIG *scaled_ref_frame[2] = {NULL, NULL};
int fb = get_ref_frame_idx(cpi, mbmi->ref_frame); int fb = get_ref_frame_idx(cpi, mbmi->ref_frame);
...@@ -2991,7 +2992,7 @@ int64_t vp9_rd_pick_inter_mode_sb(VP9_COMP *cpi, MACROBLOCK *x, ...@@ -2991,7 +2992,7 @@ int64_t vp9_rd_pick_inter_mode_sb(VP9_COMP *cpi, MACROBLOCK *x,
best_mode = this_mode; best_mode = this_mode;
} }
if (this_mode != I4X4_PRED) { if (this_mode != I4X4_PRED && this_mode != SPLITMV) {
// Store the respective mode distortions for later use. // Store the respective mode distortions for later use.
if (mode_distortions[this_mode] == -1 if (mode_distortions[this_mode] == -1
|| distortion2 < mode_distortions[this_mode]) { || distortion2 < mode_distortions[this_mode]) {
...@@ -3107,8 +3108,7 @@ int64_t vp9_rd_pick_inter_mode_sb(VP9_COMP *cpi, MACROBLOCK *x, ...@@ -3107,8 +3108,7 @@ int64_t vp9_rd_pick_inter_mode_sb(VP9_COMP *cpi, MACROBLOCK *x,
// Flag all modes that have a distortion thats > 2x the best we found at // Flag all modes that have a distortion thats > 2x the best we found at
// this level. // this level.
for (mode_index = 0; mode_index < MB_MODE_COUNT; ++mode_index) { for (mode_index = 0; mode_index < MB_MODE_COUNT; ++mode_index) {
if (mode_index == NEARESTMV || mode_index == NEARMV || mode_index == NEWMV if (mode_index == NEARESTMV || mode_index == NEARMV || mode_index == NEWMV)
|| mode_index == SPLITMV)
continue; continue;
if (mode_distortions[mode_index] > 2 * *returndistortion) { if (mode_distortions[mode_index] > 2 * *returndistortion) {
...@@ -3191,7 +3191,8 @@ int64_t vp9_rd_pick_inter_mode_sb(VP9_COMP *cpi, MACROBLOCK *x, ...@@ -3191,7 +3191,8 @@ int64_t vp9_rd_pick_inter_mode_sb(VP9_COMP *cpi, MACROBLOCK *x,
} }
} }
if (best_mbmode.mode == SPLITMV) { if (best_mbmode.ref_frame != INTRA_FRAME &&
best_mbmode.sb_type < BLOCK_SIZE_SB8X8) {
for (i = 0; i < 4; i++) for (i = 0; i < 4; i++)
xd->mode_info_context->bmi[i].as_mv[0].as_int = xd->mode_info_context->bmi[i].as_mv[0].as_int =
best_bmodes[i].as_mv[0].as_int; best_bmodes[i].as_mv[0].as_int;
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment