Commit e6485581 authored by Ronald S. Bultje's avatar Ronald S. Bultje

Remove splitmv.

We leave it in rdopt.c as a local define for now - this can be removed
later. In all other places, we remove it, thereby slightly decreasing
the size of some arrays in the bitstream.

Change-Id: Ic2a9beb97a4eda0b086f62c039d994b192f99ca5
parent 1efa79d3
......@@ -82,12 +82,11 @@ typedef enum {
NEARMV,
ZEROMV,
NEWMV,
SPLITMV,
MB_MODE_COUNT
} MB_PREDICTION_MODE;
static INLINE int is_inter_mode(MB_PREDICTION_MODE mode) {
return mode >= NEARESTMV && mode <= SPLITMV;
return mode >= NEARESTMV && mode <= NEWMV;
}
#define INTRA_MODE_COUNT (TM_PRED + 1)
......@@ -122,7 +121,7 @@ typedef enum {
#define VP9_UV_MODES (TM_PRED + 1)
#define VP9_I32X32_MODES (TM_PRED + 1)
#define VP9_MVREFS (1 + SPLITMV - NEARESTMV)
#define VP9_MVREFS (1 + NEWMV - NEARESTMV)
#define WHT_UPSCALE_FACTOR 2
......@@ -690,7 +689,6 @@ static INLINE void foreach_predicted_block_in_plane(
const MACROBLOCKD* const xd, BLOCK_SIZE_TYPE bsize, int plane,
foreach_predicted_block_visitor visit, void *arg) {
int i, x, y;
const MB_PREDICTION_MODE mode = xd->mode_info_context->mbmi.mode;
// block sizes in number of 4x4 blocks log 2 ("*_b")
// 4x4=0, 8x8=2, 16x16=4, 32x32=6, 64x64=8
......@@ -701,7 +699,8 @@ static INLINE void foreach_predicted_block_in_plane(
// size of the predictor to use.
int pred_w, pred_h;
if (mode == SPLITMV) {
if (xd->mode_info_context->mbmi.sb_type < BLOCK_SIZE_SB8X8) {
assert(bsize == BLOCK_SIZE_SB8X8);
pred_w = 0;
pred_h = 0;
} else {
......
......@@ -192,11 +192,6 @@ void vp9_accum_mv_refs(VP9_COMMON *pc,
++mv_ref_ct[context][2][0];
} else {
++mv_ref_ct[context][2][1];
if (m == NEWMV) {
++mv_ref_ct[context][3][0];
} else {
++mv_ref_ct[context][3][1];
}
}
}
}
......
......@@ -24,11 +24,10 @@ static void lower_mv_precision(int_mv *mv, int usehp) {
}
}
vp9_prob *vp9_mv_ref_probs(VP9_COMMON *pc, vp9_prob p[4], int context) {
vp9_prob *vp9_mv_ref_probs(VP9_COMMON *pc, vp9_prob *p, int context) {
p[0] = pc->fc.vp9_mode_contexts[context][0];
p[1] = pc->fc.vp9_mode_contexts[context][1];
p[2] = pc->fc.vp9_mode_contexts[context][2];
p[3] = pc->fc.vp9_mode_contexts[context][3];
return p;
}
......
......@@ -30,7 +30,6 @@ static void lf_init_lut(loop_filter_info_n *lfi) {
lfi->mode_lf_lut[NEARESTMV] = 2;
lfi->mode_lf_lut[NEARMV] = 2;
lfi->mode_lf_lut[NEWMV] = 2;
lfi->mode_lf_lut[SPLITMV] = 3;
}
void vp9_loop_filter_update_sharpness(loop_filter_info_n *lfi,
......
......@@ -220,7 +220,8 @@ void vp9_find_mv_refs_idx(VP9_COMMON *cm, MACROBLOCKD *xd, MODE_INFO *here,
add_candidate_mv(mv_ref_list, candidate_scores,
&refmv_count, c_refmv, 16);
}
split_count += (candidate_mi->mbmi.mode == SPLITMV);
split_count += (candidate_mi->mbmi.sb_type < BLOCK_SIZE_SB8X8 &&
candidate_mi->mbmi.ref_frame != INTRA_FRAME);
// Count number of neihgbours coded intra and zeromv
intra_count += (candidate_mi->mbmi.mode < NEARESTMV);
......
......@@ -392,8 +392,10 @@ static void build_inter_predictors(int plane, int block,
assert(x < bw);
assert(y < bh);
assert(xd->mode_info_context->mbmi.mode == SPLITMV || 4 << pred_w == bw);
assert(xd->mode_info_context->mbmi.mode == SPLITMV || 4 << pred_h == bh);
assert(xd->mode_info_context->mbmi.sb_type < BLOCK_SIZE_SB8X8 ||
4 << pred_w == bw);
assert(xd->mode_info_context->mbmi.sb_type < BLOCK_SIZE_SB8X8 ||
4 << pred_h == bh);
for (which_mv = 0; which_mv < 1 + use_second_ref; ++which_mv) {
// source
......@@ -412,7 +414,7 @@ static void build_inter_predictors(int plane, int block,
MV split_chroma_mv;
int_mv clamped_mv;
if (xd->mode_info_context->mbmi.mode == SPLITMV) {
if (xd->mode_info_context->mbmi.sb_type < BLOCK_SIZE_SB8X8) {
if (plane == 0) {
mv = &xd->mode_info_context->bmi[block].as_mv[which_mv].as_mv;
} else {
......
This diff is collapsed.
......@@ -512,7 +512,7 @@ static void pack_mb_tokens(vp9_writer* const bc,
static void write_sb_mv_ref(vp9_writer *bc, MB_PREDICTION_MODE m,
const vp9_prob *p) {
#if CONFIG_DEBUG
assert(NEARESTMV <= m && m < SPLITMV);
assert(NEARESTMV <= m && m <= NEWMV);
#endif
write_token(bc, vp9_sb_mv_ref_tree, p,
vp9_sb_mv_ref_encoding_array - NEARESTMV + m);
......@@ -717,21 +717,20 @@ static void pack_inter_mode_mvs(VP9_COMP *cpi, MODE_INFO *m,
// If segment skip is not enabled code the mode.
if (!vp9_segfeature_active(xd, segment_id, SEG_LVL_SKIP)) {
if (mi->sb_type >= BLOCK_SIZE_SB8X8)
if (mi->sb_type >= BLOCK_SIZE_SB8X8) {
write_sb_mv_ref(bc, mode, mv_ref_p);
vp9_accum_mv_refs(&cpi->common, mode, mi->mb_mode_context[rf]);
vp9_accum_mv_refs(&cpi->common, mode, mi->mb_mode_context[rf]);
}
}
if (is_inter_mode(mode)) {
if (cpi->common.mcomp_filter_type == SWITCHABLE) {
write_token(bc, vp9_switchable_interp_tree,
vp9_get_pred_probs(&cpi->common, xd,
PRED_SWITCHABLE_INTERP),
vp9_switchable_interp_encodings +
vp9_switchable_interp_map[mi->interp_filter]);
} else {
assert(mi->interp_filter == cpi->common.mcomp_filter_type);
}
if (cpi->common.mcomp_filter_type == SWITCHABLE) {
write_token(bc, vp9_switchable_interp_tree,
vp9_get_pred_probs(&cpi->common, xd,
PRED_SWITCHABLE_INTERP),
vp9_switchable_interp_encodings +
vp9_switchable_interp_map[mi->interp_filter]);
} else {
assert(mi->interp_filter == cpi->common.mcomp_filter_type);
}
// does the feature use compound prediction or not
......@@ -741,57 +740,51 @@ static void pack_inter_mode_mvs(VP9_COMP *cpi, MODE_INFO *m,
vp9_get_pred_prob(pc, xd, PRED_COMP));
}
switch (mode) { /* new, split require MVs */
case NEWMV:
if (xd->mode_info_context->mbmi.sb_type < BLOCK_SIZE_SB8X8) {
int j;
MB_PREDICTION_MODE blockmode;
int_mv blockmv;
int bwl = b_width_log2(mi->sb_type), bw = 1 << bwl;
int bhl = b_height_log2(mi->sb_type), bh = 1 << bhl;
int idx, idy;
for (idy = 0; idy < 2; idy += bh) {
for (idx = 0; idx < 2; idx += bw) {
j = idy * 2 + idx;
blockmode = cpi->mb.partition_info->bmi[j].mode;
blockmv = cpi->mb.partition_info->bmi[j].mv;
write_sb_mv_ref(bc, blockmode, mv_ref_p);
vp9_accum_mv_refs(&cpi->common, blockmode, mi->mb_mode_context[rf]);
if (blockmode == NEWMV) {
#ifdef ENTROPY_STATS
active_section = 5;
active_section = 11;
#endif
vp9_encode_mv(bc,
&mi->mv[0].as_mv, &mi->best_mv.as_mv,
nmvc, xd->allow_high_precision_mv);
vp9_encode_mv(bc, &blockmv.as_mv, &mi->best_mv.as_mv,
nmvc, xd->allow_high_precision_mv);
if (mi->second_ref_frame > 0)
vp9_encode_mv(bc,
&mi->mv[1].as_mv, &mi->best_second_mv.as_mv,
nmvc, xd->allow_high_precision_mv);
break;
case SPLITMV: {
int j;
MB_PREDICTION_MODE blockmode;
int_mv blockmv;
int bwl = b_width_log2(mi->sb_type), bw = 1 << bwl;
int bhl = b_height_log2(mi->sb_type), bh = 1 << bhl;
int idx, idy;
for (idy = 0; idy < 2; idy += bh) {
for (idx = 0; idx < 2; idx += bw) {
j = idy * 2 + idx;
blockmode = cpi->mb.partition_info->bmi[j].mode;
blockmv = cpi->mb.partition_info->bmi[j].mv;
write_sb_mv_ref(bc, blockmode, mv_ref_p);
vp9_accum_mv_refs(&cpi->common, blockmode, mi->mb_mode_context[rf]);
if (blockmode == NEWMV) {
#ifdef ENTROPY_STATS
active_section = 11;
#endif
vp9_encode_mv(bc, &blockmv.as_mv, &mi->best_mv.as_mv,
if (mi->second_ref_frame > 0)
vp9_encode_mv(bc,
&cpi->mb.partition_info->bmi[j].second_mv.as_mv,
&mi->best_second_mv.as_mv,
nmvc, xd->allow_high_precision_mv);
if (mi->second_ref_frame > 0)
vp9_encode_mv(bc,
&cpi->mb.partition_info->bmi[j].second_mv.as_mv,
&mi->best_second_mv.as_mv,
nmvc, xd->allow_high_precision_mv);
}
}
}
}
#ifdef MODE_STATS
++count_mb_seg[mi->partitioning];
++count_mb_seg[mi->partitioning];
#endif
break;
}
default:
break;
} else if (mode == NEWMV) {
#ifdef ENTROPY_STATS
active_section = 5;
#endif
vp9_encode_mv(bc,
&mi->mv[0].as_mv, &mi->best_mv.as_mv,
nmvc, xd->allow_high_precision_mv);
if (mi->second_ref_frame > 0)
vp9_encode_mv(bc,
&mi->mv[1].as_mv, &mi->best_second_mv.as_mv,
nmvc, xd->allow_high_precision_mv);
}
}
}
......
......@@ -332,7 +332,9 @@ static void update_state(VP9_COMP *cpi,
MACROBLOCKD *const xd = &x->e_mbd;
MODE_INFO *mi = &ctx->mic;
MB_MODE_INFO *const mbmi = &xd->mode_info_context->mbmi;
int mb_mode = mi->mbmi.mode;
#if CONFIG_DEBUG || CONFIG_INTERNAL_STATS
MB_PREDICTION_MODE mb_mode = mi->mbmi.mode;
#endif
int mb_mode_index = ctx->best_mode_index;
const int mis = cpi->common.mode_info_stride;
const int bh = 1 << mi_height_log2(bsize), bw = 1 << mi_width_log2(bsize);
......@@ -362,7 +364,8 @@ static void update_state(VP9_COMP *cpi,
ctx->txfm_rd_diff[ALLOW_32X32] = ctx->txfm_rd_diff[ALLOW_16X16];
}
if (mb_mode == SPLITMV) {
if (mbmi->ref_frame != INTRA_FRAME &&
mbmi->sb_type < BLOCK_SIZE_SB8X8) {
vpx_memcpy(x->partition_info, &ctx->partition_info,
sizeof(PARTITION_INFO));
......@@ -448,7 +451,8 @@ static void update_state(VP9_COMP *cpi,
*/
// Note how often each mode chosen as best
cpi->mode_chosen_counts[mb_mode_index]++;
if (mbmi->mode == SPLITMV || mbmi->mode == NEWMV) {
if (mbmi->ref_frame != INTRA_FRAME &&
(mbmi->sb_type < BLOCK_SIZE_SB8X8 || mbmi->mode == NEWMV)) {
int_mv best_mv, best_second_mv;
MV_REFERENCE_FRAME rf = mbmi->ref_frame;
best_mv.as_int = ctx->best_ref_mv.as_int;
......@@ -1617,7 +1621,7 @@ static void encode_superblock(VP9_COMP *cpi, TOKENEXTRA **t,
cpi->zbin_mode_boost = GF_ZEROMV_ZBIN_BOOST;
else
cpi->zbin_mode_boost = LF_ZEROMV_ZBIN_BOOST;
} else if (mbmi->mode == SPLITMV) {
} else if (mbmi->sb_type < BLOCK_SIZE_SB8X8) {
cpi->zbin_mode_boost = SPLIT_MV_ZBIN_BOOST;
} else {
cpi->zbin_mode_boost = MV_ZBIN_BOOST;
......
......@@ -50,6 +50,7 @@ DECLARE_ALIGNED(16, extern const uint8_t,
vp9_pt_energy_class[MAX_ENTROPY_TOKENS]);
#define I4X4_PRED 0x8000
#define SPLITMV 0x10000
const MODE_DEFINITION vp9_mode_order[MAX_MODES] = {
{ZEROMV, LAST_FRAME, NONE},
......@@ -988,7 +989,7 @@ int vp9_cost_mv_ref(VP9_COMP *cpi,
VP9_COMMON *pc = &cpi->common;
vp9_prob p [VP9_MVREFS - 1];
assert(NEARESTMV <= m && m <= SPLITMV);
assert(NEARESTMV <= m && m <= NEWMV);
vp9_mv_ref_probs(pc, p, mode_context);
return cost_token(vp9_sb_mv_ref_tree, p,
vp9_sb_mv_ref_encoding_array - NEARESTMV + m);
......@@ -1536,6 +1537,7 @@ static int rd_pick_best_mbsegmentation(VP9_COMP *cpi, MACROBLOCK *x,
*returndistortion = bsi.d;
*returnyrate = bsi.segment_yrate;
*skippable = vp9_sby_is_skippable(&x->e_mbd, BLOCK_SIZE_SB8X8);
mbmi->mode = bsi.modes[3];
return (int)(bsi.segment_rd);
}
......@@ -2896,7 +2898,6 @@ int64_t vp9_rd_pick_inter_mode_sb(VP9_COMP *cpi, MACROBLOCK *x,
compmode_cost =
vp9_cost_bit(vp9_get_pred_prob(cm, xd, PRED_COMP), is_comp_pred);
mbmi->mode = this_mode;
} else {
YV12_BUFFER_CONFIG *scaled_ref_frame[2] = {NULL, NULL};
int fb = get_ref_frame_idx(cpi, mbmi->ref_frame);
......@@ -2991,7 +2992,7 @@ int64_t vp9_rd_pick_inter_mode_sb(VP9_COMP *cpi, MACROBLOCK *x,
best_mode = this_mode;
}
if (this_mode != I4X4_PRED) {
if (this_mode != I4X4_PRED && this_mode != SPLITMV) {
// Store the respective mode distortions for later use.
if (mode_distortions[this_mode] == -1
|| distortion2 < mode_distortions[this_mode]) {
......@@ -3107,8 +3108,7 @@ int64_t vp9_rd_pick_inter_mode_sb(VP9_COMP *cpi, MACROBLOCK *x,
// Flag all modes that have a distortion thats > 2x the best we found at
// this level.
for (mode_index = 0; mode_index < MB_MODE_COUNT; ++mode_index) {
if (mode_index == NEARESTMV || mode_index == NEARMV || mode_index == NEWMV
|| mode_index == SPLITMV)
if (mode_index == NEARESTMV || mode_index == NEARMV || mode_index == NEWMV)
continue;
if (mode_distortions[mode_index] > 2 * *returndistortion) {
......@@ -3191,7 +3191,8 @@ int64_t vp9_rd_pick_inter_mode_sb(VP9_COMP *cpi, MACROBLOCK *x,
}
}
if (best_mbmode.mode == SPLITMV) {
if (best_mbmode.ref_frame != INTRA_FRAME &&
best_mbmode.sb_type < BLOCK_SIZE_SB8X8) {
for (i = 0; i < 4; i++)
xd->mode_info_context->bmi[i].as_mv[0].as_int =
best_bmodes[i].as_mv[0].as_int;
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment