Commit 2cf73eb8 authored by Sarah Parker's avatar Sarah Parker

Move compound segment mask buffer from mbmi to xd

This resolves crashes on awcy due to excessive memory
use. No change in BDRATE.

Change-Id: If3e67683dee6658db16dd5c1d686111e6415c493
parent 47433999
......@@ -322,14 +322,18 @@ typedef struct RD_STATS {
} RD_STATS;
#if CONFIG_EXT_INTER
// This struct is used to group function args that are commonly
// sent together in functions related to interinter compound modes
typedef struct {
COMPOUND_TYPE type;
#if CONFIG_WEDGE
int wedge_index;
int wedge_sign;
#endif // CONFIG_WEDGE
#if CONFIG_COMPOUND_SEGMENT
SEG_MASK_TYPE mask_type;
DECLARE_ALIGNED(16, uint8_t, seg_mask[2 * MAX_SB_SQUARE]);
uint8_t *seg_mask;
#endif // CONFIG_COMPOUND_SEGMENT
COMPOUND_TYPE interinter_compound_type;
} INTERINTER_COMPOUND_DATA;
#endif // CONFIG_EXT_INTER
......@@ -387,12 +391,21 @@ typedef struct {
#endif // CONFIG_EXT_INTRA
#if CONFIG_EXT_INTER
// interintra members
INTERINTRA_MODE interintra_mode;
// TODO(debargha): Consolidate these flags
int use_wedge_interintra;
int interintra_wedge_index;
int interintra_wedge_sign;
INTERINTER_COMPOUND_DATA interinter_compound_data;
// interinter members
COMPOUND_TYPE interinter_compound_type;
#if CONFIG_WEDGE
int wedge_index;
int wedge_sign;
#endif // CONFIG_WEDGE
#if CONFIG_COMPOUND_SEGMENT
SEG_MASK_TYPE mask_type;
#endif // CONFIG_COMPOUND_SEGMENT
#endif // CONFIG_EXT_INTER
MOTION_MODE motion_mode;
#if CONFIG_MOTION_VAR
......@@ -653,6 +666,10 @@ typedef struct macroblockd {
const EobThresholdMD *eob_threshold_md;
#endif
#if CONFIG_EXT_INTER && CONFIG_COMPOUND_SEGMENT
DECLARE_ALIGNED(16, uint8_t, seg_mask[2 * MAX_SB_SQUARE]);
#endif // CONFIG_EXT_INTER && CONFIG_COMPOUND_SEGMENT
#if CONFIG_CFL
CFL_CTX *cfl;
#endif
......
......@@ -268,9 +268,9 @@ const uint8_t *av1_get_compound_type_mask_inverse(
uint8_t *mask_buffer, int h, int w, int stride,
#endif
BLOCK_SIZE sb_type) {
assert(is_masked_compound_type(comp_data->type));
assert(is_masked_compound_type(comp_data->interinter_compound_type));
(void)sb_type;
switch (comp_data->type) {
switch (comp_data->interinter_compound_type) {
#if CONFIG_WEDGE
case COMPOUND_WEDGE:
return av1_get_contiguous_soft_mask(comp_data->wedge_index,
......@@ -286,9 +286,9 @@ const uint8_t *av1_get_compound_type_mask_inverse(
const uint8_t *av1_get_compound_type_mask(
const INTERINTER_COMPOUND_DATA *const comp_data, BLOCK_SIZE sb_type) {
assert(is_masked_compound_type(comp_data->type));
assert(is_masked_compound_type(comp_data->interinter_compound_type));
(void)sb_type;
switch (comp_data->type) {
switch (comp_data->interinter_compound_type) {
#if CONFIG_WEDGE
case COMPOUND_WEDGE:
return av1_get_contiguous_soft_mask(comp_data->wedge_index,
......@@ -596,7 +596,7 @@ static void build_masked_compound_wedge_extend(
const int subw = (2 << b_width_log2_lookup[sb_type]) == w;
const uint8_t *mask;
size_t mask_stride;
switch (comp_data->type) {
switch (comp_data->interinter_compound_type) {
case COMPOUND_WEDGE:
mask = av1_get_soft_mask(comp_data->wedge_index, comp_data->wedge_sign,
sb_type, wedge_offset_x, wedge_offset_y);
......@@ -624,7 +624,7 @@ static void build_masked_compound_wedge_extend_highbd(
const int subw = (2 << b_width_log2_lookup[sb_type]) == w;
const uint8_t *mask;
size_t mask_stride;
switch (comp_data->type) {
switch (comp_data->interinter_compound_type) {
case COMPOUND_WEDGE:
mask = av1_get_soft_mask(comp_data->wedge_index, comp_data->wedge_sign,
sb_type, wedge_offset_x, wedge_offset_y);
......@@ -699,7 +699,17 @@ void av1_make_masked_inter_predictor(const uint8_t *pre, int pre_stride,
#endif // CONFIG_GLOBAL_MOTION || CONFIG_WARPED_MOTION
MACROBLOCKD *xd) {
MODE_INFO *mi = xd->mi[0];
INTERINTER_COMPOUND_DATA *comp_data = &mi->mbmi.interinter_compound_data;
const INTERINTER_COMPOUND_DATA comp_data = {
#if CONFIG_WEDGE
mi->mbmi.wedge_index,
mi->mbmi.wedge_sign,
#endif // CONFIG_WEDGE
#if CONFIG_COMPOUND_SEGMENT
mi->mbmi.mask_type,
xd->seg_mask,
#endif // CONFIG_COMPOUND_SEGMENT
mi->mbmi.interinter_compound_type
};
// The prediction filter types used here should be those for
// the second reference block.
#if CONFIG_DUAL_FILTER
......@@ -726,13 +736,13 @@ void av1_make_masked_inter_predictor(const uint8_t *pre, int pre_stride,
#endif
xs, ys, xd);
#if CONFIG_COMPOUND_SEGMENT
if (!plane && comp_data->type == COMPOUND_SEG) {
if (!plane && comp_data.interinter_compound_type == COMPOUND_SEG) {
if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH)
build_compound_seg_mask_highbd(comp_data->seg_mask, comp_data->mask_type,
build_compound_seg_mask_highbd(comp_data.seg_mask, comp_data.mask_type,
dst, dst_stride, tmp_dst, MAX_SB_SIZE,
mi->mbmi.sb_type, h, w, xd->bd);
else
build_compound_seg_mask(comp_data->seg_mask, comp_data->mask_type, dst,
build_compound_seg_mask(comp_data.seg_mask, comp_data.mask_type, dst,
dst_stride, tmp_dst, MAX_SB_SIZE,
mi->mbmi.sb_type, h, w);
}
......@@ -741,20 +751,20 @@ void av1_make_masked_inter_predictor(const uint8_t *pre, int pre_stride,
#if CONFIG_SUPERTX
if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH)
build_masked_compound_wedge_extend_highbd(
dst, dst_stride, dst, dst_stride, tmp_dst, MAX_SB_SIZE, comp_data,
dst, dst_stride, dst, dst_stride, tmp_dst, MAX_SB_SIZE, &comp_data,
mi->mbmi.sb_type, wedge_offset_x, wedge_offset_y, h, w, xd->bd);
else
build_masked_compound_wedge_extend(
dst, dst_stride, dst, dst_stride, tmp_dst, MAX_SB_SIZE, comp_data,
dst, dst_stride, dst, dst_stride, tmp_dst, MAX_SB_SIZE, &comp_data,
mi->mbmi.sb_type, wedge_offset_x, wedge_offset_y, h, w);
#else
if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH)
build_masked_compound_highbd(dst, dst_stride, dst, dst_stride, tmp_dst,
MAX_SB_SIZE, comp_data, mi->mbmi.sb_type, h, w,
xd->bd);
MAX_SB_SIZE, &comp_data, mi->mbmi.sb_type, h,
w, xd->bd);
else
build_masked_compound(dst, dst_stride, dst, dst_stride, tmp_dst,
MAX_SB_SIZE, comp_data, mi->mbmi.sb_type, h, w);
MAX_SB_SIZE, &comp_data, mi->mbmi.sb_type, h, w);
#endif // CONFIG_SUPERTX
#else // CONFIG_HIGHBITDEPTH
......@@ -769,18 +779,18 @@ void av1_make_masked_inter_predictor(const uint8_t *pre, int pre_stride,
#endif
xs, ys, xd);
#if CONFIG_COMPOUND_SEGMENT
if (!plane && comp_data->type == COMPOUND_SEG)
build_compound_seg_mask(comp_data->seg_mask, comp_data->mask_type, dst,
if (!plane && comp_data.interinter_compound_type == COMPOUND_SEG)
build_compound_seg_mask(comp_data.seg_mask, comp_data.mask_type, dst,
dst_stride, tmp_dst, MAX_SB_SIZE, mi->mbmi.sb_type,
h, w);
#endif // CONFIG_COMPOUND_SEGMENT
#if CONFIG_SUPERTX
build_masked_compound_wedge_extend(dst, dst_stride, dst, dst_stride, tmp_dst,
MAX_SB_SIZE, comp_data, mi->mbmi.sb_type,
MAX_SB_SIZE, &comp_data, mi->mbmi.sb_type,
wedge_offset_x, wedge_offset_y, h, w);
#else
build_masked_compound(dst, dst_stride, dst, dst_stride, tmp_dst, MAX_SB_SIZE,
comp_data, mi->mbmi.sb_type, h, w);
&comp_data, mi->mbmi.sb_type, h, w);
#endif // CONFIG_SUPERTX
#endif // CONFIG_HIGHBITDEPTH
#if CONFIG_COMPOUND_SEGMENT
......@@ -995,8 +1005,7 @@ void build_inter_predictors(MACROBLOCKD *xd, int plane,
(scaled_mv.col >> SUBPEL_BITS);
#if CONFIG_EXT_INTER
if (ref &&
is_masked_compound_type(mi->mbmi.interinter_compound_data.type))
if (ref && is_masked_compound_type(mi->mbmi.interinter_compound_type))
av1_make_masked_inter_predictor(
pre, pre_buf->stride, dst, dst_buf->stride, subpel_x, subpel_y,
sf, w, h, mi->mbmi.interp_filter, xs, ys,
......@@ -1118,8 +1127,7 @@ void build_inter_predictors(MACROBLOCKD *xd, int plane,
#endif // CONFIG_GLOBAL_MOTION || CONFIG_WARPED_MOTION
conv_params.ref = ref;
#if CONFIG_EXT_INTER
if (ref &&
is_masked_compound_type(mi->mbmi.interinter_compound_data.type))
if (ref && is_masked_compound_type(mi->mbmi.interinter_compound_type))
av1_make_masked_inter_predictor(
pre[ref], pre_buf->stride, dst, dst_buf->stride,
subpel_params[ref].subpel_x, subpel_params[ref].subpel_y, sf, w, h,
......@@ -1876,8 +1884,8 @@ void modify_neighbor_predictor_for_obmc(MB_MODE_INFO *mbmi) {
if (is_interintra_pred(mbmi)) {
mbmi->ref_frame[1] = NONE_FRAME;
} else if (has_second_ref(mbmi) &&
is_masked_compound_type(mbmi->interinter_compound_data.type)) {
mbmi->interinter_compound_data.type = COMPOUND_AVERAGE;
is_masked_compound_type(mbmi->interinter_compound_type)) {
mbmi->interinter_compound_type = COMPOUND_AVERAGE;
mbmi->ref_frame[1] = NONE_FRAME;
}
#endif // CONFIG_EXT_INTER
......@@ -2964,16 +2972,25 @@ static void build_wedge_inter_predictor_from_buf(
MACROBLOCKD_PLANE *const pd = &xd->plane[plane];
struct buf_2d *const dst_buf = &pd->dst;
uint8_t *const dst = dst_buf->buf + dst_buf->stride * y + x;
INTERINTER_COMPOUND_DATA *comp_data = &mbmi->interinter_compound_data;
const INTERINTER_COMPOUND_DATA comp_data = {
#if CONFIG_WEDGE
mbmi->wedge_index,
mbmi->wedge_sign,
#endif // CONFIG_WEDGE
#if CONFIG_COMPOUND_SEGMENT
mbmi->mask_type,
xd->seg_mask,
#endif // CONFIG_COMPOUND_SEGMENT
mbmi->interinter_compound_type
};
if (is_compound &&
is_masked_compound_type(mbmi->interinter_compound_data.type)) {
if (is_compound && is_masked_compound_type(mbmi->interinter_compound_type)) {
#if CONFIG_COMPOUND_SEGMENT
if (!plane && comp_data->type == COMPOUND_SEG)
#if CONFIG_HIGHBITDEPTH
if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH)
build_compound_seg_mask_highbd(
comp_data->seg_mask, comp_data->mask_type,
comp_data.seg_mask, comp_data.mask_type,
CONVERT_TO_BYTEPTR(ext_dst0), ext_dst_stride0,
CONVERT_TO_BYTEPTR(ext_dst1), ext_dst_stride1, mbmi->sb_type, h, w,
xd->bd);
......@@ -2989,26 +3006,26 @@ static void build_wedge_inter_predictor_from_buf(
if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH)
build_masked_compound_wedge_extend_highbd(
dst, dst_buf->stride, CONVERT_TO_BYTEPTR(ext_dst0), ext_dst_stride0,
CONVERT_TO_BYTEPTR(ext_dst1), ext_dst_stride1, comp_data,
CONVERT_TO_BYTEPTR(ext_dst1), ext_dst_stride1, &comp_data,
mbmi->sb_type, wedge_offset_x, wedge_offset_y, h, w, xd->bd);
else
#endif // CONFIG_HIGHBITDEPTH
build_masked_compound_wedge_extend(
dst, dst_buf->stride, ext_dst0, ext_dst_stride0, ext_dst1,
ext_dst_stride1, comp_data, mbmi->sb_type, wedge_offset_x,
ext_dst_stride1, &comp_data, mbmi->sb_type, wedge_offset_x,
wedge_offset_y, h, w);
#else
#if CONFIG_HIGHBITDEPTH
if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH)
build_masked_compound_highbd(
dst, dst_buf->stride, CONVERT_TO_BYTEPTR(ext_dst0), ext_dst_stride0,
CONVERT_TO_BYTEPTR(ext_dst1), ext_dst_stride1, comp_data,
CONVERT_TO_BYTEPTR(ext_dst1), ext_dst_stride1, &comp_data,
mbmi->sb_type, h, w, xd->bd);
else
#endif // CONFIG_HIGHBITDEPTH
build_masked_compound(dst, dst_buf->stride, ext_dst0, ext_dst_stride0,
ext_dst1, ext_dst_stride1, comp_data, mbmi->sb_type,
h, w);
ext_dst1, ext_dst_stride1, &comp_data,
mbmi->sb_type, h, w);
#endif // CONFIG_SUPERTX
} else {
#if CONFIG_HIGHBITDEPTH
......
......@@ -2201,7 +2201,7 @@ static void read_inter_block_mode_info(AV1Decoder *const pbi,
#endif // CONFIG_MOTION_VAR || CONFIG_WARPED_MOTION
#if CONFIG_EXT_INTER
mbmi->interinter_compound_data.type = COMPOUND_AVERAGE;
mbmi->interinter_compound_type = COMPOUND_AVERAGE;
if (cm->reference_mode != SINGLE_REFERENCE &&
is_inter_compound_mode(mbmi->mode)
#if CONFIG_MOTION_VAR || CONFIG_WARPED_MOTION
......@@ -2210,29 +2210,27 @@ static void read_inter_block_mode_info(AV1Decoder *const pbi,
) {
if (is_any_masked_compound_used(bsize)) {
#if CONFIG_COMPOUND_SEGMENT || CONFIG_WEDGE
mbmi->interinter_compound_data.type =
mbmi->interinter_compound_type =
aom_read_tree(r, av1_compound_type_tree,
cm->fc->compound_type_prob[bsize], ACCT_STR);
#endif // CONFIG_COMPOUND_SEGMENT || CONFIG_WEDGE
#if CONFIG_WEDGE
if (mbmi->interinter_compound_data.type == COMPOUND_WEDGE) {
mbmi->interinter_compound_data.wedge_index =
if (mbmi->interinter_compound_type == COMPOUND_WEDGE) {
mbmi->wedge_index =
aom_read_literal(r, get_wedge_bits_lookup(bsize), ACCT_STR);
mbmi->interinter_compound_data.wedge_sign = aom_read_bit(r, ACCT_STR);
mbmi->wedge_sign = aom_read_bit(r, ACCT_STR);
}
#endif // CONFIG_WEDGE
#if CONFIG_COMPOUND_SEGMENT
if (mbmi->interinter_compound_data.type == COMPOUND_SEG) {
mbmi->interinter_compound_data.mask_type =
aom_read_literal(r, MAX_SEG_MASK_BITS, ACCT_STR);
if (mbmi->interinter_compound_type == COMPOUND_SEG) {
mbmi->mask_type = aom_read_literal(r, MAX_SEG_MASK_BITS, ACCT_STR);
}
#endif // CONFIG_COMPOUND_SEGMENT
} else {
mbmi->interinter_compound_data.type = COMPOUND_AVERAGE;
mbmi->interinter_compound_type = COMPOUND_AVERAGE;
}
if (xd->counts)
xd->counts
->compound_interinter[bsize][mbmi->interinter_compound_data.type]++;
xd->counts->compound_interinter[bsize][mbmi->interinter_compound_type]++;
}
#endif // CONFIG_EXT_INTER
......
......@@ -2072,21 +2072,19 @@ static void pack_inter_mode_mvs(AV1_COMP *cpi, const int mi_row,
#endif // CONFIG_MOTION_VAR
&& is_any_masked_compound_used(bsize)) {
#if CONFIG_COMPOUND_SEGMENT || CONFIG_WEDGE
av1_write_token(
w, av1_compound_type_tree, cm->fc->compound_type_prob[bsize],
&compound_type_encodings[mbmi->interinter_compound_data.type]);
av1_write_token(w, av1_compound_type_tree,
cm->fc->compound_type_prob[bsize],
&compound_type_encodings[mbmi->interinter_compound_type]);
#endif // CONFIG_COMPOUND_SEGMENT || CONFIG_WEDGE
#if CONFIG_WEDGE
if (mbmi->interinter_compound_data.type == COMPOUND_WEDGE) {
aom_write_literal(w, mbmi->interinter_compound_data.wedge_index,
get_wedge_bits_lookup(bsize));
aom_write_bit(w, mbmi->interinter_compound_data.wedge_sign);
if (mbmi->interinter_compound_type == COMPOUND_WEDGE) {
aom_write_literal(w, mbmi->wedge_index, get_wedge_bits_lookup(bsize));
aom_write_bit(w, mbmi->wedge_sign);
}
#endif // CONFIG_WEDGE
#if CONFIG_COMPOUND_SEGMENT
if (mbmi->interinter_compound_data.type == COMPOUND_SEG) {
aom_write_literal(w, mbmi->interinter_compound_data.mask_type,
MAX_SEG_MASK_BITS);
if (mbmi->interinter_compound_type == COMPOUND_SEG) {
aom_write_literal(w, mbmi->mask_type, MAX_SEG_MASK_BITS);
}
#endif // CONFIG_COMPOUND_SEGMENT
}
......
......@@ -2233,8 +2233,7 @@ static void update_stats(const AV1_COMMON *const cm, ThreadData *td, int mi_row,
&& mbmi->motion_mode == SIMPLE_TRANSLATION
#endif // CONFIG_MOTION_VAR || CONFIG_WARPED_MOTION
) {
counts->compound_interinter[bsize]
[mbmi->interinter_compound_data.type]++;
counts->compound_interinter[bsize][mbmi->interinter_compound_type]++;
}
#endif // CONFIG_EXT_INTER
}
......
......@@ -7562,11 +7562,11 @@ static int64_t pick_wedge_fixed_sign(
}
static int64_t pick_interinter_wedge(const AV1_COMP *const cpi,
const MACROBLOCK *const x,
MACROBLOCK *const x,
const BLOCK_SIZE bsize,
const uint8_t *const p0,
const uint8_t *const p1) {
const MACROBLOCKD *const xd = &x->e_mbd;
MACROBLOCKD *const xd = &x->e_mbd;
MB_MODE_INFO *const mbmi = &xd->mi[0]->mbmi;
const int bw = block_size_wide[bsize];
......@@ -7583,19 +7583,18 @@ static int64_t pick_interinter_wedge(const AV1_COMP *const cpi,
rd = pick_wedge(cpi, x, bsize, p0, p1, &wedge_sign, &wedge_index);
}
mbmi->interinter_compound_data.wedge_sign = wedge_sign;
mbmi->interinter_compound_data.wedge_index = wedge_index;
mbmi->wedge_sign = wedge_sign;
mbmi->wedge_index = wedge_index;
return rd;
}
#endif // CONFIG_WEDGE
#if CONFIG_COMPOUND_SEGMENT
static int64_t pick_interinter_seg(const AV1_COMP *const cpi,
const MACROBLOCK *const x,
const BLOCK_SIZE bsize,
MACROBLOCK *const x, const BLOCK_SIZE bsize,
const uint8_t *const p0,
const uint8_t *const p1) {
const MACROBLOCKD *const xd = &x->e_mbd;
MACROBLOCKD *const xd = &x->e_mbd;
MB_MODE_INFO *const mbmi = &xd->mi[0]->mbmi;
const struct buf_2d *const src = &x->plane[0].src;
const int bw = block_size_wide[bsize];
......@@ -7614,7 +7613,6 @@ static int64_t pick_interinter_seg(const AV1_COMP *const cpi,
#else
const int bd_round = 0;
#endif // CONFIG_HIGHBITDEPTH
INTERINTER_COMPOUND_DATA *comp_data = &mbmi->interinter_compound_data;
DECLARE_ALIGNED(32, int16_t, r0[MAX_SB_SQUARE]);
DECLARE_ALIGNED(32, int16_t, r1[MAX_SB_SQUARE]);
DECLARE_ALIGNED(32, int16_t, d10[MAX_SB_SQUARE]);
......@@ -7641,15 +7639,15 @@ static int64_t pick_interinter_seg(const AV1_COMP *const cpi,
#if CONFIG_HIGHBITDEPTH
if (hbd)
build_compound_seg_mask_highbd(
comp_data->seg_mask, cur_mask_type, CONVERT_TO_BYTEPTR(p0), bw,
xd->seg_mask, cur_mask_type, CONVERT_TO_BYTEPTR(p0), bw,
CONVERT_TO_BYTEPTR(p1), bw, bsize, bh, bw, xd->bd);
else
#endif // CONFIG_HIGHBITDEPTH
build_compound_seg_mask(comp_data->seg_mask, cur_mask_type, p0, bw, p1,
bw, bsize, bh, bw);
build_compound_seg_mask(xd->seg_mask, cur_mask_type, p0, bw, p1, bw,
bsize, bh, bw);
// compute rd for mask
sse = av1_wedge_sse_from_residuals(r1, d10, comp_data->seg_mask, N);
sse = av1_wedge_sse_from_residuals(r1, d10, xd->seg_mask, N);
sse = ROUND_POWER_OF_TWO(sse, bd_round);
model_rd_from_sse(cpi, xd, bsize, 0, sse, &rate, &dist);
......@@ -7662,16 +7660,16 @@ static int64_t pick_interinter_seg(const AV1_COMP *const cpi,
}
// make final mask
comp_data->mask_type = best_mask_type;
mbmi->mask_type = best_mask_type;
#if CONFIG_HIGHBITDEPTH
if (hbd)
build_compound_seg_mask_highbd(
comp_data->seg_mask, comp_data->mask_type, CONVERT_TO_BYTEPTR(p0), bw,
xd->seg_mask, mbmi->mask_type, CONVERT_TO_BYTEPTR(p0), bw,
CONVERT_TO_BYTEPTR(p1), bw, bsize, bh, bw, xd->bd);
else
#endif // CONFIG_HIGHBITDEPTH
build_compound_seg_mask(comp_data->seg_mask, comp_data->mask_type, p0, bw,
p1, bw, bsize, bh, bw);
build_compound_seg_mask(xd->seg_mask, mbmi->mask_type, p0, bw, p1, bw,
bsize, bh, bw);
return best_rd;
}
......@@ -7700,13 +7698,12 @@ static int64_t pick_interintra_wedge(const AV1_COMP *const cpi,
#endif // CONFIG_WEDGE && CONFIG_INTERINTRA
#if CONFIG_COMPOUND_SEGMENT || CONFIG_WEDGE
static int64_t pick_interinter_mask(const AV1_COMP *const cpi,
const MACROBLOCK *const x,
static int64_t pick_interinter_mask(const AV1_COMP *const cpi, MACROBLOCK *x,
const BLOCK_SIZE bsize,
const uint8_t *const p0,
const uint8_t *const p1) {
const COMPOUND_TYPE compound_type =
x->e_mbd.mi[0]->mbmi.interinter_compound_data.type;
x->e_mbd.mi[0]->mbmi.interinter_compound_type;
switch (compound_type) {
#if CONFIG_WEDGE
case COMPOUND_WEDGE: return pick_interinter_wedge(cpi, x, bsize, p0, p1);
......@@ -7723,24 +7720,35 @@ static int interinter_compound_motion_search(const AV1_COMP *const cpi,
const BLOCK_SIZE bsize,
const int this_mode, int mi_row,
int mi_col) {
const MACROBLOCKD *const xd = &x->e_mbd;
MACROBLOCKD *const xd = &x->e_mbd;
MB_MODE_INFO *const mbmi = &xd->mi[0]->mbmi;
int_mv tmp_mv[2];
int rate_mvs[2], tmp_rate_mv = 0;
const INTERINTER_COMPOUND_DATA compound_data = {
#if CONFIG_WEDGE
mbmi->wedge_index,
mbmi->wedge_sign,
#endif // CONFIG_WEDGE
#if CONFIG_COMPOUND_SEGMENT
mbmi->mask_type,
xd->seg_mask,
#endif // CONFIG_COMPOUND_SEGMENT
mbmi->interinter_compound_type
};
if (this_mode == NEW_NEWMV) {
do_masked_motion_search_indexed(cpi, x, &mbmi->interinter_compound_data,
bsize, mi_row, mi_col, tmp_mv, rate_mvs, 2);
do_masked_motion_search_indexed(cpi, x, &compound_data, bsize, mi_row,
mi_col, tmp_mv, rate_mvs, 2);
tmp_rate_mv = rate_mvs[0] + rate_mvs[1];
mbmi->mv[0].as_int = tmp_mv[0].as_int;
mbmi->mv[1].as_int = tmp_mv[1].as_int;
} else if (this_mode == NEW_NEARESTMV || this_mode == NEW_NEARMV) {
do_masked_motion_search_indexed(cpi, x, &mbmi->interinter_compound_data,
bsize, mi_row, mi_col, tmp_mv, rate_mvs, 0);
do_masked_motion_search_indexed(cpi, x, &compound_data, bsize, mi_row,
mi_col, tmp_mv, rate_mvs, 0);
tmp_rate_mv = rate_mvs[0];
mbmi->mv[0].as_int = tmp_mv[0].as_int;
} else if (this_mode == NEAREST_NEWMV || this_mode == NEAR_NEWMV) {
do_masked_motion_search_indexed(cpi, x, &mbmi->interinter_compound_data,
bsize, mi_row, mi_col, tmp_mv, rate_mvs, 1);
do_masked_motion_search_indexed(cpi, x, &compound_data, bsize, mi_row,
mi_col, tmp_mv, rate_mvs, 1);
tmp_rate_mv = rate_mvs[1];
mbmi->mv[1].as_int = tmp_mv[1].as_int;
}
......@@ -7760,7 +7768,7 @@ static int64_t build_and_cost_compound_type(
int64_t rd = INT64_MAX;
int tmp_skip_txfm_sb;
int64_t tmp_skip_sse_sb;
const COMPOUND_TYPE compound_type = mbmi->interinter_compound_data.type;
const COMPOUND_TYPE compound_type = mbmi->interinter_compound_type;
best_rd_cur = pick_interinter_mask(cpi, x, bsize, *preds0, *preds1);
best_rd_cur += RDCOST(x->rdmult, x->rddiv, rs2 + rate_mv, 0);
......@@ -8488,7 +8496,7 @@ static int64_t handle_inter_mode(
*args->compmode_interintra_cost = 0;
mbmi->use_wedge_interintra = 0;
*args->compmode_interinter_cost = 0;
mbmi->interinter_compound_data.type = COMPOUND_AVERAGE;
mbmi->interinter_compound_type = COMPOUND_AVERAGE;
// is_comp_interintra_pred implies !is_comp_pred
assert(!is_comp_interintra_pred || (!is_comp_pred));
......@@ -8710,7 +8718,13 @@ static int64_t handle_inter_mode(
best_mv[0].as_int = cur_mv[0].as_int;
best_mv[1].as_int = cur_mv[1].as_int;
memset(&best_compound_data, 0, sizeof(INTERINTER_COMPOUND_DATA));
memset(&best_compound_data, 0, sizeof(best_compound_data));
#if CONFIG_COMPOUND_SEGMENT
uint8_t tmp_mask_buf[2 * MAX_SB_SQUARE];
best_compound_data.seg_mask = tmp_mask_buf;
#endif // CONFIG_COMPOUND_SEGMENT
av1_cost_tokens(compound_type_cost, cm->fc->compound_type_prob[bsize],
av1_compound_type_tree);
if (masked_compound_used) {
av1_cost_tokens(compound_type_cost, cm->fc->compound_type_prob[bsize],
......@@ -8726,11 +8740,11 @@ static int64_t handle_inter_mode(
if (!is_interinter_compound_used(cur_type, bsize)) break;
tmp_rate_mv = rate_mv;
best_rd_cur = INT64_MAX;
mbmi->interinter_compound_data.type = cur_type;
mbmi->interinter_compound_type = cur_type;
rs2 = av1_cost_literal(get_interinter_compound_type_bits(
bsize, mbmi->interinter_compound_data.type)) +
bsize, mbmi->interinter_compound_type)) +
(masked_compound_used
? compound_type_cost[mbmi->interinter_compound_data.type]
? compound_type_cost[mbmi->interinter_compound_type]
: 0);
switch (cur_type) {
......@@ -8770,8 +8784,17 @@ static int64_t handle_inter_mode(
if (best_rd_cur < best_rd_compound) {
best_rd_compound = best_rd_cur;
memcpy(&best_compound_data, &mbmi->interinter_compound_data,
sizeof(best_compound_data));
#if CONFIG_WEDGE
best_compound_data.wedge_index = mbmi->wedge_index;
best_compound_data.wedge_sign = mbmi->wedge_sign;
#endif // CONFIG_WEDGE
#if CONFIG_COMPOUND_SEGMENT
best_compound_data.mask_type = mbmi->mask_type;
memcpy(best_compound_data.seg_mask, xd->seg_mask,
2 * MAX_SB_SQUARE * sizeof(uint8_t));
#endif // CONFIG_COMPOUND_SEGMENT
best_compound_data.interinter_compound_type =
mbmi->interinter_compound_type;
if (have_newmv_in_inter_mode(this_mode)) {
if (use_masked_motion_search(cur_type)) {
best_tmp_rate_mv = tmp_rate_mv;
......@@ -8787,14 +8810,23 @@ static int64_t handle_inter_mode(
mbmi->mv[0].as_int = cur_mv[0].as_int;
mbmi->mv[1].as_int = cur_mv[1].as_int;
}
memcpy(&mbmi->interinter_compound_data, &best_compound_data,
sizeof(INTERINTER_COMPOUND_DATA));
#if CONFIG_WEDGE
mbmi->wedge_index = best_compound_data.wedge_index;
mbmi->wedge_sign = best_compound_data.wedge_sign;
#endif // CONFIG_WEDGE
#if CONFIG_COMPOUND_SEGMENT
mbmi->mask_type = best_compound_data.mask_type;
memcpy(xd->seg_mask, best_compound_data.seg_mask,
2 * MAX_SB_SQUARE * sizeof(uint8_t));
#endif // CONFIG_COMPOUND_SEGMENT
mbmi->interinter_compound_type =
best_compound_data.interinter_compound_type;
if (have_newmv_in_inter_mode(this_mode)) {
mbmi->mv[0].as_int = best_mv[0].as_int;
mbmi->mv[1].as_int = best_mv[1].as_int;
xd->mi[0]->bmi[0].as_mv[0].as_int = mbmi->mv[0].as_int;
xd->mi[0]->bmi[0].as_mv[1].as_int = mbmi->mv[1].as_int;
if (use_masked_motion_search(mbmi->interinter_compound_data.type)) {
if (use_masked_motion_search(mbmi->interinter_compound_type)) {
rd_stats->rate += best_tmp_rate_mv - rate_mv;
rate_mv = best_tmp_rate_mv;
}
......@@ -8809,9 +8841,9 @@ static int64_t handle_inter_mode(
*args->compmode_interinter_cost =
av1_cost_literal(get_interinter_compound_type_bits(
bsize, mbmi->interinter_compound_data.type)) +
bsize, mbmi->interinter_compound_type)) +
(masked_compound_used
? compound_type_cost[mbmi->interinter_compound_data.type]
? compound_type_cost[mbmi->interinter_compound_type]
: 0);
}
......@@ -11653,7 +11685,7 @@ void av1_rd_pick_inter_mode_sub8x8(const struct AV1_COMP *cpi,
#endif // CONFIG_FILTER_INTRA
mbmi->motion_mode = SIMPLE_TRANSLATION;
#if CONFIG_EXT_INTER
mbmi->interinter_compound_data.type = COMPOUND_AVERAGE;
mbmi->interinter_compound_type = COMPOUND_AVERAGE;
mbmi->use_wedge_interintra = 0;
#endif // CONFIG_EXT_INTER
#if CONFIG_WARPED_MOTION
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment