Commit 88fe7871 authored by Debargha Mukherjee's avatar Debargha Mukherjee
Browse files

Refactor wedge generation

Change-Id: I2ec4f562e28a4673477e20186f9d6167b24b76b8
parent cf3ee225
......@@ -53,31 +53,41 @@ static INLINE int is_inter_mode(PREDICTION_MODE mode) {
}
#if CONFIG_EXT_INTER
#define WEDGE_BITS_SML 3
#define WEDGE_BITS_MED 4
#define WEDGE_BITS_BIG 5
#define WEDGE_BITS_SML 2
#define WEDGE_BITS_MED 3
#define WEDGE_BITS_BIG 4
#define WEDGE_NONE -1
#define WEDGE_WEIGHT_BITS 6
static INLINE int get_wedge_bits(BLOCK_SIZE sb_type) {
if (sb_type < BLOCK_8X8)
return 0;
if (sb_type <= BLOCK_8X8)
return WEDGE_BITS_SML;
else if (sb_type <= BLOCK_32X32)
return WEDGE_BITS_MED;
else
return WEDGE_BITS_BIG;
}
static const int get_wedge_bits_lookup[BLOCK_SIZES] = {
0,
0,
0,
WEDGE_BITS_SML,
WEDGE_BITS_MED,
WEDGE_BITS_MED,
WEDGE_BITS_MED,
WEDGE_BITS_MED,
WEDGE_BITS_MED,
WEDGE_BITS_MED,
WEDGE_BITS_BIG,
WEDGE_BITS_BIG,
WEDGE_BITS_BIG,
#if CONFIG_EXT_PARTITION
WEDGE_BITS_BIG,
WEDGE_BITS_BIG,
WEDGE_BITS_BIG,
#endif // CONFIG_EXT_PARTITION
};
static INLINE int is_interinter_wedge_used(BLOCK_SIZE sb_type) {
(void) sb_type;
return get_wedge_bits(sb_type) > 0;
return get_wedge_bits_lookup[sb_type] > 0;
}
static INLINE int is_interintra_wedge_used(BLOCK_SIZE sb_type) {
(void) sb_type;
return 0; // get_wedge_bits(sb_type) > 0;
return get_wedge_bits_lookup[sb_type] > 0;
}
static INLINE int is_inter_singleref_mode(PREDICTION_MODE mode) {
......@@ -205,9 +215,10 @@ typedef struct {
// TODO(debargha): Consolidate these flags
int use_wedge_interintra;
int interintra_wedge_index;
int interintra_uv_wedge_index;
int interintra_wedge_sign;
int use_wedge_interinter;
int interinter_wedge_index;
int interinter_wedge_sign;
#endif // CONFIG_EXT_INTER
#if CONFIG_OBMC
......
This diff is collapsed.
......@@ -410,6 +410,7 @@ void vp10_build_prediction_by_left_preds(VP10_COMMON *cm,
void vp10_init_wedge_masks();
const uint8_t *vp10_get_soft_mask(int wedge_index,
int wedge_sign,
BLOCK_SIZE sb_type,
int h, int w);
......
......@@ -1566,8 +1566,8 @@ static void read_inter_block_mode_info(VP10Decoder *const pbi,
xd->counts->wedge_interintra[bsize][mbmi->use_wedge_interintra]++;
if (mbmi->use_wedge_interintra) {
mbmi->interintra_wedge_index =
mbmi->interintra_uv_wedge_index =
vp10_read_literal(r, get_wedge_bits(bsize));
vp10_read_literal(r, get_wedge_bits_lookup[bsize]);
mbmi->interintra_wedge_sign = 0;
}
}
}
......@@ -1599,7 +1599,8 @@ static void read_inter_block_mode_info(VP10Decoder *const pbi,
xd->counts->wedge_interinter[bsize][mbmi->use_wedge_interinter]++;
if (mbmi->use_wedge_interinter) {
mbmi->interinter_wedge_index =
vp10_read_literal(r, get_wedge_bits(bsize));
vp10_read_literal(r, get_wedge_bits_lookup[bsize]);
mbmi->interinter_wedge_sign = vp10_read_bit(r);
}
}
#endif // CONFIG_EXT_INTER
......
......@@ -1316,7 +1316,8 @@ static void pack_inter_mode_mvs(VP10_COMP *cpi, const MODE_INFO *mi,
cm->fc->wedge_interintra_prob[bsize]);
if (mbmi->use_wedge_interintra) {
vp10_write_literal(w, mbmi->interintra_wedge_index,
get_wedge_bits(bsize));
get_wedge_bits_lookup[bsize]);
assert(mbmi->interintra_wedge_sign == 0);
}
}
}
......@@ -1343,9 +1344,11 @@ static void pack_inter_mode_mvs(VP10_COMP *cpi, const MODE_INFO *mi,
is_interinter_wedge_used(bsize)) {
vp10_write(w, mbmi->use_wedge_interinter,
cm->fc->wedge_interinter_prob[bsize]);
if (mbmi->use_wedge_interinter)
if (mbmi->use_wedge_interinter) {
vp10_write_literal(w, mbmi->interinter_wedge_index,
get_wedge_bits(bsize));
get_wedge_bits_lookup[bsize]);
vp10_write_bit(w, mbmi->interinter_wedge_sign);
}
}
#endif // CONFIG_EXT_INTER
......
......@@ -6108,6 +6108,7 @@ static void do_masked_motion_search(VP10_COMP *cpi, MACROBLOCK *x,
static void do_masked_motion_search_indexed(VP10_COMP *cpi, MACROBLOCK *x,
int wedge_index,
int wedge_sign,
BLOCK_SIZE bsize,
int mi_row, int mi_col,
int_mv *tmp_mv, int *rate_mv,
......@@ -6121,7 +6122,7 @@ static void do_masked_motion_search_indexed(VP10_COMP *cpi, MACROBLOCK *x,
int h = (4 << b_height_log2_lookup[sb_type]);
const uint8_t *mask;
const int mask_stride = MASK_MASTER_STRIDE;
mask = vp10_get_soft_mask(wedge_index, sb_type, h, w);
mask = vp10_get_soft_mask(wedge_index, wedge_sign, sb_type, h, w);
if (which == 0 || which == 2)
do_masked_motion_search(cpi, x, mask, mask_stride, bsize,
......@@ -6130,7 +6131,7 @@ static void do_masked_motion_search_indexed(VP10_COMP *cpi, MACROBLOCK *x,
if (which == 1 || which == 2) {
// get the negative mask
mask = vp10_get_soft_mask(wedge_index ^ 1, sb_type, h, w);
mask = vp10_get_soft_mask(wedge_index, !wedge_sign, sb_type, h, w);
do_masked_motion_search(cpi, x, mask, mask_stride, bsize,
mi_row, mi_col, &tmp_mv[1], &rate_mv[1],
1, mv_idx[1]);
......@@ -6790,9 +6791,9 @@ static int64_t handle_inter_mode(VP10_COMP *cpi, MACROBLOCK *x,
rd = RDCOST(x->rdmult, x->rddiv, rs + rate_mv + rate_sum, dist_sum);
best_rd_nowedge = rd;
mbmi->use_wedge_interinter = 1;
rs = get_wedge_bits(bsize) * 256 +
rs = (1 + get_wedge_bits_lookup[bsize]) * 256 +
vp10_cost_bit(cm->fc->wedge_interinter_prob[bsize], 1);
wedge_types = (1 << get_wedge_bits(bsize));
wedge_types = (1 << get_wedge_bits_lookup[bsize]);
if (have_newmv_in_inter_mode(this_mode)) {
int_mv tmp_mv[2];
int rate_mvs[2], tmp_rate_mv = 0;
......@@ -6810,8 +6811,9 @@ static int64_t handle_inter_mode(VP10_COMP *cpi, MACROBLOCK *x,
vp10_build_inter_predictors_for_planes_single_buf(
xd, bsize, mi_row, mi_col, 1, preds1, strides);
for (wedge_index = 0; wedge_index < wedge_types; ++wedge_index) {
mbmi->interinter_wedge_index = wedge_index;
for (wedge_index = 0; wedge_index < 2 * wedge_types; ++wedge_index) {
mbmi->interinter_wedge_index = wedge_index >> 1;
mbmi->interinter_wedge_sign = wedge_index & 1;
vp10_build_wedge_inter_predictor_from_buf(xd, bsize, mi_row, mi_col,
preds0, strides,
preds1, strides);
......@@ -6823,10 +6825,13 @@ static int64_t handle_inter_mode(VP10_COMP *cpi, MACROBLOCK *x,
best_rd_wedge = rd;
}
}
mbmi->interinter_wedge_index = best_wedge_index;
mbmi->interinter_wedge_index = best_wedge_index >> 1;
mbmi->interinter_wedge_sign = best_wedge_index & 1;
if (this_mode == NEW_NEWMV) {
int mv_idxs[2] = {0, 0};
do_masked_motion_search_indexed(cpi, x, mbmi->interinter_wedge_index,
do_masked_motion_search_indexed(cpi, x,
mbmi->interinter_wedge_index,
mbmi->interinter_wedge_sign,
bsize, mi_row, mi_col, tmp_mv, rate_mvs,
mv_idxs, 2);
tmp_rate_mv = rate_mvs[0] + rate_mvs[1];
......@@ -6834,14 +6839,18 @@ static int64_t handle_inter_mode(VP10_COMP *cpi, MACROBLOCK *x,
mbmi->mv[1].as_int = tmp_mv[1].as_int;
} else if (this_mode == NEW_NEARESTMV || this_mode == NEW_NEARMV) {
int mv_idxs[2] = {0, 0};
do_masked_motion_search_indexed(cpi, x, mbmi->interinter_wedge_index,
do_masked_motion_search_indexed(cpi, x,
mbmi->interinter_wedge_index,
mbmi->interinter_wedge_sign,
bsize, mi_row, mi_col, tmp_mv, rate_mvs,
mv_idxs, 0);
tmp_rate_mv = rate_mvs[0];
mbmi->mv[0].as_int = tmp_mv[0].as_int;
} else if (this_mode == NEAREST_NEWMV || this_mode == NEAR_NEWMV) {
int mv_idxs[2] = {0, 0};
do_masked_motion_search_indexed(cpi, x, mbmi->interinter_wedge_index,
do_masked_motion_search_indexed(cpi, x,
mbmi->interinter_wedge_index,
mbmi->interinter_wedge_sign,
bsize, mi_row, mi_col, tmp_mv, rate_mvs,
mv_idxs, 1);
tmp_rate_mv = rate_mvs[1];
......@@ -6860,7 +6869,8 @@ static int64_t handle_inter_mode(VP10_COMP *cpi, MACROBLOCK *x,
}
if (best_rd_wedge < best_rd_nowedge) {
mbmi->use_wedge_interinter = 1;
mbmi->interinter_wedge_index = best_wedge_index;
mbmi->interinter_wedge_index = best_wedge_index >> 1;
mbmi->interinter_wedge_sign = best_wedge_index & 1;
xd->mi[0]->bmi[0].as_mv[0].as_int = mbmi->mv[0].as_int;
xd->mi[0]->bmi[0].as_mv[1].as_int = mbmi->mv[1].as_int;
*rate2 += tmp_rate_mv - rate_mv;
......@@ -6884,8 +6894,9 @@ static int64_t handle_inter_mode(VP10_COMP *cpi, MACROBLOCK *x,
xd, bsize, mi_row, mi_col, 0, preds0, strides);
vp10_build_inter_predictors_for_planes_single_buf(
xd, bsize, mi_row, mi_col, 1, preds1, strides);
for (wedge_index = 0; wedge_index < wedge_types; ++wedge_index) {
mbmi->interinter_wedge_index = wedge_index;
for (wedge_index = 0; wedge_index < 2 * wedge_types; ++wedge_index) {
mbmi->interinter_wedge_index = wedge_index >> 1;
mbmi->interinter_wedge_sign = wedge_index & 1;
vp10_build_wedge_inter_predictor_from_buf(xd, bsize, mi_row, mi_col,
preds0, strides,
preds1, strides);
......@@ -6899,7 +6910,8 @@ static int64_t handle_inter_mode(VP10_COMP *cpi, MACROBLOCK *x,
}
if (best_rd_wedge < best_rd_nowedge) {
mbmi->use_wedge_interinter = 1;
mbmi->interinter_wedge_index = best_wedge_index;
mbmi->interinter_wedge_index = best_wedge_index >> 1;
mbmi->interinter_wedge_sign = best_wedge_index & 1;
} else {
mbmi->use_wedge_interinter = 0;
}
......@@ -6911,7 +6923,7 @@ static int64_t handle_inter_mode(VP10_COMP *cpi, MACROBLOCK *x,
pred_exists = 0;
tmp_rd = VPXMIN(best_rd_wedge, best_rd_nowedge);
if (mbmi->use_wedge_interinter)
*compmode_wedge_cost = get_wedge_bits(bsize) * 256 +
*compmode_wedge_cost = (1 + get_wedge_bits_lookup[bsize]) * 256 +
vp10_cost_bit(cm->fc->wedge_interinter_prob[bsize], 1);
else
*compmode_wedge_cost =
......@@ -6924,7 +6936,7 @@ static int64_t handle_inter_mode(VP10_COMP *cpi, MACROBLOCK *x,
int rmode, rate_sum;
int64_t dist_sum;
int j;
int wedge_bits, wedge_types, wedge_index, best_wedge_index = -1;
int wedge_types, wedge_index, best_wedge_index = -1;
int64_t best_interintra_rd_nowedge = INT64_MAX;
int64_t best_interintra_rd_wedge = INT64_MAX;
int rwedge;
......@@ -6992,7 +7004,6 @@ static int64_t handle_inter_mode(VP10_COMP *cpi, MACROBLOCK *x,
rmode = interintra_mode_cost[mbmi->interintra_mode];
if (is_interintra_wedge_used(bsize)) {
wedge_bits = get_wedge_bits(bsize);
vp10_combine_interintra(xd, bsize, 0, tmp_buf, MAX_SB_SIZE,
intrapred, MAX_SB_SIZE);
vp10_combine_interintra(xd, bsize, 1,
......@@ -7009,12 +7020,12 @@ static int64_t handle_inter_mode(VP10_COMP *cpi, MACROBLOCK *x,
best_interintra_rd_nowedge = rd;
mbmi->use_wedge_interintra = 1;
rwedge = wedge_bits * 256 +
wedge_types = (1 << get_wedge_bits_lookup[bsize]);
rwedge = get_wedge_bits_lookup[bsize] * 256 +
vp10_cost_bit(cm->fc->wedge_interintra_prob[bsize], 1);
wedge_types = (1 << wedge_bits);
for (wedge_index = 0; wedge_index < wedge_types; ++wedge_index) {
mbmi->interintra_wedge_index = wedge_index;
mbmi->interintra_uv_wedge_index = wedge_index;
mbmi->interintra_wedge_sign = 0;
vp10_combine_interintra(xd, bsize, 0,
tmp_buf, MAX_SB_SIZE,
intrapred, MAX_SB_SIZE);
......@@ -7037,9 +7048,9 @@ static int64_t handle_inter_mode(VP10_COMP *cpi, MACROBLOCK *x,
if (have_newmv_in_inter_mode(this_mode)) {
// get negative of mask
const uint8_t* mask = vp10_get_soft_mask(
best_wedge_index ^ 1, bsize, bh, bw);
best_wedge_index, 1, bsize, bh, bw);
mbmi->interintra_wedge_index = best_wedge_index;
mbmi->interintra_uv_wedge_index = best_wedge_index;
mbmi->interintra_wedge_sign = 0;
do_masked_motion_search(cpi, x, mask, MASK_MASTER_STRIDE, bsize,
mi_row, mi_col, &tmp_mv, &tmp_rate_mv,
0, mv_idx);
......@@ -7062,7 +7073,7 @@ static int64_t handle_inter_mode(VP10_COMP *cpi, MACROBLOCK *x,
if (best_interintra_rd_wedge < best_interintra_rd_nowedge) {
mbmi->use_wedge_interintra = 1;
mbmi->interintra_wedge_index = best_wedge_index;
mbmi->interintra_uv_wedge_index = best_wedge_index;
mbmi->interintra_wedge_sign = 0;
best_interintra_rd = best_interintra_rd_wedge;
mbmi->mv[0].as_int = tmp_mv.as_int;
*rate2 += tmp_rate_mv - rate_mv;
......@@ -7083,7 +7094,7 @@ static int64_t handle_inter_mode(VP10_COMP *cpi, MACROBLOCK *x,
*compmode_interintra_cost += vp10_cost_bit(
cm->fc->wedge_interintra_prob[bsize], mbmi->use_wedge_interintra);
if (mbmi->use_wedge_interintra) {
*compmode_interintra_cost += get_wedge_bits(bsize) * 256;
*compmode_interintra_cost += get_wedge_bits_lookup[bsize] * 256;
}
}
} else if (is_interintra_allowed(mbmi)) {
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment