Commit 42d9610a authored by Sarah Parker's avatar Sarah Parker

Avoid sending bits for the compound type for sub 8x8 blocks

The only compound mode used with sub 8x8 blocks is COMPOUND_AVERAGE, so
we don't have to send anything in this case

Change-Id: I90d0162e5f7f1ad205e65094293cde2a48eb77b1
parent 0115691e
......@@ -177,13 +177,30 @@ typedef struct {
extern const wedge_params_type wedge_params_lookup[BLOCK_SIZES];
static INLINE int get_wedge_bits_lookup(BLOCK_SIZE sb_type) {
return wedge_params_lookup[sb_type].bits;
static INLINE int is_interinter_compound_used(COMPOUND_TYPE type,
BLOCK_SIZE sb_type) {
switch (type) {
case COMPOUND_AVERAGE: (void)sb_type; return 1;
case COMPOUND_WEDGE: return wedge_params_lookup[sb_type].bits > 0;
#if CONFIG_COMPOUND_SEGMENT
case COMPOUND_SEG: return sb_type >= BLOCK_8X8;
#endif // CONFIG_COMPOUND_SEGMENT
default: assert(0); return 0;
}
}
static INLINE int is_interinter_wedge_used(BLOCK_SIZE sb_type) {
(void)sb_type;
return wedge_params_lookup[sb_type].bits > 0;
static INLINE int is_any_masked_compound_used(BLOCK_SIZE sb_type) {
COMPOUND_TYPE comp_type;
for (comp_type = 0; comp_type < COMPOUND_TYPES; comp_type++) {
if (is_masked_compound_type(comp_type) &&
is_interinter_compound_used(comp_type, sb_type))
return 1;
}
return 0;
}
static INLINE int get_wedge_bits_lookup(BLOCK_SIZE sb_type) {
return wedge_params_lookup[sb_type].bits;
}
static INLINE int get_interinter_wedge_bits(BLOCK_SIZE sb_type) {
......
......@@ -1940,29 +1940,33 @@ static void read_inter_block_mode_info(AV1Decoder *const pbi,
#endif // CONFIG_MOTION_VAR || CONFIG_WARPED_MOTION
#if CONFIG_EXT_INTER
mbmi->interinter_compound_data.type = COMPOUND_AVERAGE;
if (cm->reference_mode != SINGLE_REFERENCE &&
is_inter_compound_mode(mbmi->mode)
#if CONFIG_MOTION_VAR || CONFIG_WARPED_MOTION
&& mbmi->motion_mode == SIMPLE_TRANSLATION
#endif // CONFIG_MOTION_VAR || CONFIG_WARPED_MOTION
) {
mbmi->interinter_compound_data.type = aom_read_tree(
r, av1_compound_type_tree, cm->fc->compound_type_prob[bsize], ACCT_STR);
if (is_any_masked_compound_used(bsize)) {
mbmi->interinter_compound_data.type =
aom_read_tree(r, av1_compound_type_tree,
cm->fc->compound_type_prob[bsize], ACCT_STR);
if (mbmi->interinter_compound_data.type == COMPOUND_WEDGE) {
mbmi->interinter_compound_data.wedge_index =
aom_read_literal(r, get_wedge_bits_lookup(bsize), ACCT_STR);
mbmi->interinter_compound_data.wedge_sign = aom_read_bit(r, ACCT_STR);
}
#if CONFIG_COMPOUND_SEGMENT
else if (mbmi->interinter_compound_data.type == COMPOUND_SEG) {
mbmi->interinter_compound_data.mask_type =
aom_read_literal(r, MAX_SEG_MASK_BITS, ACCT_STR);
}
#endif // CONFIG_COMPOUND_SEGMENT
} else {
mbmi->interinter_compound_data.type = COMPOUND_AVERAGE;
}
if (xd->counts)
xd->counts->compound_interinter[bsize]
[mbmi->interinter_compound_data.type]++;
if (mbmi->interinter_compound_data.type == COMPOUND_WEDGE) {
mbmi->interinter_compound_data.wedge_index =
aom_read_literal(r, get_wedge_bits_lookup(bsize), ACCT_STR);
mbmi->interinter_compound_data.wedge_sign = aom_read_bit(r, ACCT_STR);
}
#if CONFIG_COMPOUND_SEGMENT
else if (mbmi->interinter_compound_data.type == COMPOUND_SEG) {
mbmi->interinter_compound_data.mask_type =
aom_read_literal(r, MAX_SEG_MASK_BITS, ACCT_STR);
}
#endif // CONFIG_COMPOUND_SEGMENT
}
#endif // CONFIG_EXT_INTER
......
......@@ -1714,7 +1714,7 @@ static void pack_inter_mode_mvs(AV1_COMP *cpi, const MODE_INFO *mi,
#if CONFIG_MOTION_VAR
&& mbmi->motion_mode == SIMPLE_TRANSLATION
#endif // CONFIG_MOTION_VAR
) {
&& is_any_masked_compound_used(bsize)) {
av1_write_token(
w, av1_compound_type_tree, cm->fc->compound_type_prob[bsize],
&compound_type_encodings[mbmi->interinter_compound_data.type]);
......
......@@ -7534,7 +7534,7 @@ static int64_t pick_interinter_wedge(const AV1_COMP *const cpi,
int wedge_index = -1;
int wedge_sign = 0;
assert(is_interinter_wedge_used(bsize));
assert(is_interinter_compound_used(COMPOUND_WEDGE, bsize));
if (cpi->sf.fast_wedge_sign_estimate) {
wedge_sign = estimate_wedge_sign(cpi, x, bsize, p0, bw, p1, bw);
......@@ -8326,6 +8326,7 @@ static int64_t handle_inter_mode(
uint8_t *preds1[1] = { pred1 };
int strides[1] = { bw };
int tmp_rate_mv;
int masked_compound_used = is_any_masked_compound_used(bsize);
COMPOUND_TYPE cur_type;
best_mv[0].as_int = cur_mv[0].as_int;
......@@ -8334,7 +8335,7 @@ static int64_t handle_inter_mode(
av1_cost_tokens(compound_type_cost, cm->fc->compound_type_prob[bsize],
av1_compound_type_tree);
if (is_interinter_wedge_used(bsize)) {
if (masked_compound_used) {
// get inter predictors to use for masked compound modes
av1_build_inter_predictors_for_planes_single_buf(
xd, bsize, 0, 0, mi_row, mi_col, 0, preds0, strides);
......@@ -8343,12 +8344,15 @@ static int64_t handle_inter_mode(
}
for (cur_type = COMPOUND_AVERAGE; cur_type < COMPOUND_TYPES; cur_type++) {
if (!is_interinter_compound_used(cur_type, bsize)) break;
tmp_rate_mv = rate_mv;
best_rd_cur = INT64_MAX;
mbmi->interinter_compound_data.type = cur_type;
rs2 = av1_cost_literal(get_interinter_compound_type_bits(
bsize, mbmi->interinter_compound_data.type)) +
compound_type_cost[mbmi->interinter_compound_data.type];
(masked_compound_used
? compound_type_cost[mbmi->interinter_compound_data.type]
: 0);
switch (cur_type) {
case COMPOUND_AVERAGE:
......@@ -8363,7 +8367,6 @@ static int64_t handle_inter_mode(
best_rd_compound = best_rd_cur;
break;
case COMPOUND_WEDGE:
if (!is_interinter_wedge_used(bsize)) break;
if (x->source_variance > cpi->sf.disable_wedge_search_var_thresh &&
best_rd_compound / 3 < ref_best_rd) {
best_rd_cur = build_and_cost_compound_wedge(
......@@ -8373,7 +8376,6 @@ static int64_t handle_inter_mode(
break;
#if CONFIG_COMPOUND_SEGMENT
case COMPOUND_SEG:
if (!is_interinter_wedge_used(bsize)) break;
if (x->source_variance > cpi->sf.disable_wedge_search_var_thresh &&
best_rd_compound / 3 < ref_best_rd) {
best_rd_cur = build_and_cost_compound_seg(
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment