Commit 6fdc853e authored by Sarah Parker's avatar Sarah Parker

Refactor ext-inter to loop through all masked modes in rdopt

No change in performance

Change-Id: Ie105a7baf6a2c2258d3ef117e727957e4393f51b
parent 0c628e64
......@@ -163,6 +163,12 @@ static INLINE int have_newmv_in_inter_mode(PREDICTION_MODE mode) {
mode == NEAREST_NEWMV || mode == NEW_NEARESTMV ||
mode == NEAR_NEWMV || mode == NEW_NEARMV);
}
// TODO(sarahparker) this will eventually be extended when more
// masked compound types are added
static INLINE int is_masked_compound_type(COMPOUND_TYPE type) {
return (type == COMPOUND_WEDGE);
}
#else
static INLINE int have_newmv_in_inter_mode(PREDICTION_MODE mode) {
......@@ -232,6 +238,15 @@ typedef struct RD_STATS {
#endif // CONFIG_RD_DEBUG
} RD_STATS;
#if CONFIG_EXT_INTER
typedef struct {
COMPOUND_TYPE type;
int wedge_index;
int wedge_sign;
// TODO(sarahparker) add neccesary data for segmentation compound type
} INTERINTER_COMPOUND_DATA;
#endif // CONFIG_EXT_INTER
// This structure now relates to 8x8 block regions.
typedef struct {
// Common for both INTER and INTRA blocks
......@@ -282,9 +297,7 @@ typedef struct {
int use_wedge_interintra;
int interintra_wedge_index;
int interintra_wedge_sign;
COMPOUND_TYPE interinter_compound;
int interinter_wedge_index;
int interinter_wedge_sign;
INTERINTER_COMPOUND_DATA interinter_compound_data;
#endif // CONFIG_EXT_INTER
MOTION_MODE motion_mode;
int_mv mv[2];
......
......@@ -1910,10 +1910,9 @@ void av1_adapt_inter_frame_probs(AV1_COMMON *cm) {
}
for (i = 0; i < BLOCK_SIZES; ++i) {
if (is_interinter_wedge_used(i))
aom_tree_merge_probs(
av1_compound_type_tree, pre_fc->compound_type_prob[i],
counts->compound_interinter[i], fc->compound_type_prob[i]);
aom_tree_merge_probs(av1_compound_type_tree, pre_fc->compound_type_prob[i],
counts->compound_interinter[i],
fc->compound_type_prob[i]);
}
#endif // CONFIG_EXT_INTER
......
......@@ -251,6 +251,24 @@ const uint8_t *av1_get_soft_mask(int wedge_index, int wedge_sign,
return mask;
}
// get a mask according to the compound type
// TODO(sarahparker) this needs to be extended for other experiments and
// is currently only intended for ext_inter alone
#if CONFIG_EXT_INTER
const uint8_t *av1_get_compound_type_mask(
const INTERINTER_COMPOUND_DATA *const comp_data, BLOCK_SIZE sb_type,
int invert) {
assert(is_masked_compound_type(comp_data->type));
switch (comp_data->type) {
case COMPOUND_WEDGE:
return av1_get_contiguous_soft_mask(
comp_data->wedge_index,
invert ? !comp_data->wedge_sign : comp_data->wedge_sign, sb_type);
default: assert(0); return NULL;
}
}
#endif // CONFIG_EXT_INTER
static void init_wedge_master_masks() {
int i, j, s;
const int w = MASK_MASTER_SIZE;
......@@ -378,17 +396,16 @@ static void build_masked_compound_wedge_extend_highbd(
#endif // CONFIG_AOM_HIGHBITDEPTH
#endif // CONFIG_SUPERTX
static void build_masked_compound_wedge(uint8_t *dst, int dst_stride,
const uint8_t *src0, int src0_stride,
const uint8_t *src1, int src1_stride,
int wedge_index, int wedge_sign,
BLOCK_SIZE sb_type, int h, int w) {
static void build_masked_compound(
uint8_t *dst, int dst_stride, const uint8_t *src0, int src0_stride,
const uint8_t *src1, int src1_stride,
const INTERINTER_COMPOUND_DATA *const comp_data, BLOCK_SIZE sb_type, int h,
int w) {
// Derive subsampling from h and w passed in. May be refactored to
// pass in subsampling factors directly.
const int subh = (2 << b_height_log2_lookup[sb_type]) == h;
const int subw = (2 << b_width_log2_lookup[sb_type]) == w;
const uint8_t *mask =
av1_get_contiguous_soft_mask(wedge_index, wedge_sign, sb_type);
const uint8_t *mask = av1_get_compound_type_mask(comp_data, sb_type, 0);
aom_blend_a64_mask(dst, dst_stride, src0, src0_stride, src1, src1_stride,
mask, block_size_wide[sb_type], h, w, subh, subw);
}
......@@ -402,8 +419,7 @@ static void build_masked_compound_wedge_highbd(
// pass in subsampling factors directly.
const int subh = (2 << b_height_log2_lookup[sb_type]) == h;
const int subw = (2 << b_width_log2_lookup[sb_type]) == w;
const uint8_t *mask =
av1_get_contiguous_soft_mask(wedge_index, wedge_sign, sb_type);
const uint8_t *mask = av1_get_compound_type_mask(comp_data, sb_type, 0);
aom_highbd_blend_a64_mask(dst_8, dst_stride, src0_8, src0_stride, src1_8,
src1_stride, mask, block_size_wide[sb_type], h, w,
subh, subw, bd);
......@@ -426,6 +442,8 @@ void av1_make_masked_inter_predictor(const uint8_t *pre, int pre_stride,
#endif // CONFIG_SUPERTX
const MACROBLOCKD *xd) {
const MODE_INFO *mi = xd->mi[0];
const INTERINTER_COMPOUND_DATA *const comp_data =
&mi->mbmi.interinter_compound_data;
// The prediction filter types used here should be those for
// the second reference block.
#if CONFIG_DUAL_FILTER
......@@ -446,39 +464,35 @@ void av1_make_masked_inter_predictor(const uint8_t *pre, int pre_stride,
if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH)
build_masked_compound_wedge_extend_highbd(
dst, dst_stride, dst, dst_stride, tmp_dst, MAX_SB_SIZE,
mi->mbmi.interinter_wedge_index, mi->mbmi.interinter_wedge_sign,
mi->mbmi.sb_type, wedge_offset_x, wedge_offset_y, h, w, xd->bd);
comp_data->wedge_index, comp_data->wedge_sign, mi->mbmi.sb_type,
wedge_offset_x, wedge_offset_y, h, w, xd->bd);
else
build_masked_compound_wedge_extend(
dst, dst_stride, dst, dst_stride, tmp_dst, MAX_SB_SIZE,
mi->mbmi.interinter_wedge_index, mi->mbmi.interinter_wedge_sign,
mi->mbmi.sb_type, wedge_offset_x, wedge_offset_y, h, w);
comp_data->wedge_index, comp_data->wedge_sign, mi->mbmi.sb_type,
wedge_offset_x, wedge_offset_y, h, w);
#else
if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH)
build_masked_compound_wedge_highbd(
dst, dst_stride, dst, dst_stride, tmp_dst, MAX_SB_SIZE,
mi->mbmi.interinter_wedge_index, mi->mbmi.interinter_wedge_sign,
mi->mbmi.sb_type, h, w, xd->bd);
comp_data->wedge_index, comp_data->wedge_sign, mi->mbmi.sb_type, h, w,
xd->bd);
else
build_masked_compound_wedge(dst, dst_stride, dst, dst_stride, tmp_dst,
MAX_SB_SIZE, mi->mbmi.interinter_wedge_index,
mi->mbmi.interinter_wedge_sign,
mi->mbmi.sb_type, h, w);
build_masked_compound(dst, dst_stride, dst, dst_stride, tmp_dst,
MAX_SB_SIZE, comp_data, mi->mbmi.sb_type, h, w);
#endif // CONFIG_SUPERTX
#else // CONFIG_AOM_HIGHBITDEPTH
DECLARE_ALIGNED(16, uint8_t, tmp_dst[MAX_SB_SQUARE]);
av1_make_inter_predictor(pre, pre_stride, tmp_dst, MAX_SB_SIZE, subpel_x,
subpel_y, sf, w, h, 0, tmp_ipf, xs, ys, xd);
#if CONFIG_SUPERTX
build_masked_compound_wedge_extend(
dst, dst_stride, dst, dst_stride, tmp_dst, MAX_SB_SIZE,
mi->mbmi.interinter_wedge_index, mi->mbmi.interinter_wedge_sign,
mi->mbmi.sb_type, wedge_offset_x, wedge_offset_y, h, w);
build_masked_compound_wedge_extend(dst, dst_stride, dst, dst_stride, tmp_dst,
MAX_SB_SIZE, comp_data->wedge_index,
comp_data->wedge_sign, mi->mbmi.sb_type,
wedge_offset_x, wedge_offset_y, h, w);
#else
build_masked_compound_wedge(dst, dst_stride, dst, dst_stride, tmp_dst,
MAX_SB_SIZE, mi->mbmi.interinter_wedge_index,
mi->mbmi.interinter_wedge_sign, mi->mbmi.sb_type,
h, w);
build_masked_compound(dst, dst_stride, dst, dst_stride, tmp_dst, MAX_SB_SIZE,
comp_data, mi->mbmi.sb_type, h, w);
#endif // CONFIG_SUPERTX
#endif // CONFIG_AOM_HIGHBITDEPTH
}
......@@ -630,8 +644,8 @@ void build_inter_predictors(MACROBLOCKD *xd, int plane,
(scaled_mv.col >> SUBPEL_BITS);
#if CONFIG_EXT_INTER
if (ref && is_interinter_wedge_used(mi->mbmi.sb_type) &&
mi->mbmi.interinter_compound == COMPOUND_WEDGE)
if (ref &&
is_masked_compound_type(mi->mbmi.interinter_compound_data.type))
av1_make_masked_inter_predictor(
pre, pre_buf->stride, dst, dst_buf->stride, subpel_x, subpel_y,
sf, w, h, mi->mbmi.interp_filter, xs, ys,
......@@ -696,8 +710,7 @@ void build_inter_predictors(MACROBLOCKD *xd, int plane,
(scaled_mv.col >> SUBPEL_BITS);
#if CONFIG_EXT_INTER
if (ref && is_interinter_wedge_used(mi->mbmi.sb_type) &&
mi->mbmi.interinter_compound == COMPOUND_WEDGE)
if (ref && is_masked_compound_type(mi->mbmi.interinter_compound_data.type))
av1_make_masked_inter_predictor(pre, pre_buf->stride, dst,
dst_buf->stride, subpel_x, subpel_y, sf,
w, h, mi->mbmi.interp_filter, xs, ys,
......@@ -1280,9 +1293,9 @@ void av1_build_obmc_inter_prediction(const AV1_COMMON *cm, MACROBLOCKD *xd,
void modify_neighbor_predictor_for_obmc(MB_MODE_INFO *mbmi) {
if (is_interintra_pred(mbmi)) {
mbmi->ref_frame[1] = NONE;
} else if (has_second_ref(mbmi) && is_interinter_wedge_used(mbmi->sb_type) &&
mbmi->interinter_compound == COMPOUND_WEDGE) {
mbmi->interinter_compound = COMPOUND_AVERAGE;
} else if (has_second_ref(mbmi) &&
is_masked_compound_type(mbmi->interinter_compound_data.type)) {
mbmi->interinter_compound_data.type = COMPOUND_AVERAGE;
mbmi->ref_frame[1] = NONE;
}
return;
......@@ -2080,22 +2093,22 @@ static void build_wedge_inter_predictor_from_buf(
MACROBLOCKD_PLANE *const pd = &xd->plane[plane];
struct buf_2d *const dst_buf = &pd->dst;
uint8_t *const dst = dst_buf->buf + dst_buf->stride * y + x;
const INTERINTER_COMPOUND_DATA *const comp_data =
&mbmi->interinter_compound_data;
if (is_compound && is_interinter_wedge_used(mbmi->sb_type) &&
mbmi->interinter_compound == COMPOUND_WEDGE) {
if (is_compound &&
is_masked_compound_type(mbmi->interinter_compound_data.type)) {
#if CONFIG_AOM_HIGHBITDEPTH
if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH)
build_masked_compound_wedge_highbd(
dst, dst_buf->stride, CONVERT_TO_BYTEPTR(ext_dst0), ext_dst_stride0,
CONVERT_TO_BYTEPTR(ext_dst1), ext_dst_stride1,
mbmi->interinter_wedge_index, mbmi->interinter_wedge_sign,
mbmi->sb_type, h, w, xd->bd);
CONVERT_TO_BYTEPTR(ext_dst1), ext_dst_stride1, comp_data->wedge_index,
comp_data->wedge_sign, mbmi->sb_type, h, w, xd->bd);
else
#endif // CONFIG_AOM_HIGHBITDEPTH
build_masked_compound_wedge(
dst, dst_buf->stride, ext_dst0, ext_dst_stride0, ext_dst1,
ext_dst_stride1, mbmi->interinter_wedge_index,
mbmi->interinter_wedge_sign, mbmi->sb_type, h, w);
build_masked_compound(dst, dst_buf->stride, ext_dst0, ext_dst_stride0,
ext_dst1, ext_dst_stride1, comp_data, mbmi->sb_type,
h, w);
} else {
#if CONFIG_AOM_HIGHBITDEPTH
if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH)
......
......@@ -522,6 +522,10 @@ const uint8_t *av1_get_soft_mask(int wedge_index, int wedge_sign,
BLOCK_SIZE sb_type, int wedge_offset_x,
int wedge_offset_y);
const uint8_t *av1_get_compound_type_mask(
const INTERINTER_COMPOUND_DATA *const comp_data, BLOCK_SIZE sb_type,
int invert);
void av1_build_interintra_predictors(MACROBLOCKD *xd, uint8_t *ypred,
uint8_t *upred, uint8_t *vpred,
int ystride, int ustride, int vstride,
......
......@@ -4327,10 +4327,8 @@ static int read_compressed_header(AV1Decoder *pbi, const uint8_t *data,
}
if (cm->reference_mode != SINGLE_REFERENCE) {
for (i = 0; i < BLOCK_SIZES; i++) {
if (is_interinter_wedge_used(i)) {
for (j = 0; j < COMPOUND_TYPES - 1; j++) {
av1_diff_update_prob(&r, &fc->compound_type_prob[i][j], ACCT_STR);
}
for (j = 0; j < COMPOUND_TYPES - 1; j++) {
av1_diff_update_prob(&r, &fc->compound_type_prob[i][j], ACCT_STR);
}
}
}
......
......@@ -1816,21 +1816,22 @@ static void read_inter_block_mode_info(AV1Decoder *const pbi,
#endif // CONFIG_MOTION_VAR || CONFIG_WARPED_MOTION
#if CONFIG_EXT_INTER
mbmi->interinter_compound = COMPOUND_AVERAGE;
mbmi->interinter_compound_data.type = COMPOUND_AVERAGE;
if (cm->reference_mode != SINGLE_REFERENCE &&
is_inter_compound_mode(mbmi->mode) &&
is_inter_compound_mode(mbmi->mode)
#if CONFIG_MOTION_VAR || CONFIG_WARPED_MOTION
mbmi->motion_mode == SIMPLE_TRANSLATION &&
&& mbmi->motion_mode == SIMPLE_TRANSLATION
#endif // CONFIG_MOTION_VAR || CONFIG_WARPED_MOTION
is_interinter_wedge_used(bsize)) {
mbmi->interinter_compound = aom_read_tree(
) {
mbmi->interinter_compound_data.type = aom_read_tree(
r, av1_compound_type_tree, cm->fc->compound_type_prob[bsize], ACCT_STR);
if (xd->counts)
xd->counts->compound_interinter[bsize][mbmi->interinter_compound]++;
if (mbmi->interinter_compound == COMPOUND_WEDGE) {
mbmi->interinter_wedge_index =
xd->counts->compound_interinter[bsize]
[mbmi->interinter_compound_data.type]++;
if (mbmi->interinter_compound_data.type == COMPOUND_WEDGE) {
mbmi->interinter_compound_data.wedge_index =
aom_read_literal(r, get_wedge_bits_lookup(bsize), ACCT_STR);
mbmi->interinter_wedge_sign = aom_read_bit(r, ACCT_STR);
mbmi->interinter_compound_data.wedge_sign = aom_read_bit(r, ACCT_STR);
}
}
#endif // CONFIG_EXT_INTER
......
......@@ -1599,18 +1599,18 @@ static void pack_inter_mode_mvs(AV1_COMP *cpi, const MODE_INFO *mi,
#if CONFIG_EXT_INTER
if (cpi->common.reference_mode != SINGLE_REFERENCE &&
is_inter_compound_mode(mbmi->mode) &&
is_inter_compound_mode(mbmi->mode)
#if CONFIG_MOTION_VAR
mbmi->motion_mode == SIMPLE_TRANSLATION &&
&& mbmi->motion_mode == SIMPLE_TRANSLATION
#endif // CONFIG_MOTION_VAR
is_interinter_wedge_used(bsize)) {
av1_write_token(w, av1_compound_type_tree,
cm->fc->compound_type_prob[bsize],
&compound_type_encodings[mbmi->interinter_compound]);
if (mbmi->interinter_compound == COMPOUND_WEDGE) {
aom_write_literal(w, mbmi->interinter_wedge_index,
) {
av1_write_token(
w, av1_compound_type_tree, cm->fc->compound_type_prob[bsize],
&compound_type_encodings[mbmi->interinter_compound_data.type]);
if (mbmi->interinter_compound_data.type == COMPOUND_WEDGE) {
aom_write_literal(w, mbmi->interinter_compound_data.wedge_index,
get_wedge_bits_lookup(bsize));
aom_write_bit(w, mbmi->interinter_wedge_sign);
aom_write_bit(w, mbmi->interinter_compound_data.wedge_sign);
}
}
#endif // CONFIG_EXT_INTER
......@@ -4232,10 +4232,9 @@ static uint32_t write_compressed_header(AV1_COMP *cpi, uint8_t *data) {
}
if (cm->reference_mode != SINGLE_REFERENCE) {
for (i = 0; i < BLOCK_SIZES; i++)
if (is_interinter_wedge_used(i))
prob_diff_update(av1_compound_type_tree, fc->compound_type_prob[i],
cm->counts.compound_interinter[i], COMPOUND_TYPES,
probwt, header_bc);
prob_diff_update(av1_compound_type_tree, fc->compound_type_prob[i],
cm->counts.compound_interinter[i], COMPOUND_TYPES,
probwt, header_bc);
}
#endif // CONFIG_EXT_INTER
......
......@@ -1992,12 +1992,13 @@ static void update_stats(const AV1_COMMON *const cm, ThreadData *td, int mi_row,
#if CONFIG_EXT_INTER
if (cm->reference_mode != SINGLE_REFERENCE &&
is_inter_compound_mode(mbmi->mode) &&
is_inter_compound_mode(mbmi->mode)
#if CONFIG_MOTION_VAR || CONFIG_WARPED_MOTION
mbmi->motion_mode == SIMPLE_TRANSLATION &&
&& mbmi->motion_mode == SIMPLE_TRANSLATION
#endif // CONFIG_MOTION_VAR || CONFIG_WARPED_MOTION
is_interinter_wedge_used(bsize)) {
counts->compound_interinter[bsize][mbmi->interinter_compound]++;
) {
counts->compound_interinter[bsize]
[mbmi->interinter_compound_data.type]++;
}
#endif // CONFIG_EXT_INTER
}
......
This diff is collapsed.
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment