Commit 426a997e authored by David Barker's avatar David Barker Committed by Debargha Mukherjee

Fix ext-inter + compound-segment + supertx

Allow the above combination of experiments to work together
correctly, fixing an encode/decode mismatch bug when they
were all enabled.

This change causes build_masked_compound(_highbd) to only
ever be called if CONFIG_SUPERTX is off, so wrap these functions
in an '#if !CONFIG_SUPERTX' block.

BUG=aomedia:313

Change-Id: Ic3886bc69ba9624b8fcb0a4c2d71fc64d2c0f22c
parent d803cb96
......@@ -528,33 +528,61 @@ void av1_init_wedge_masks() {
#if CONFIG_SUPERTX
static void build_masked_compound_wedge_extend(
uint8_t *dst, int dst_stride, const uint8_t *src0, int src0_stride,
const uint8_t *src1, int src1_stride, int wedge_index, int wedge_sign,
BLOCK_SIZE sb_type, int wedge_offset_x, int wedge_offset_y, int h, int w) {
const uint8_t *src1, int src1_stride,
const INTERINTER_COMPOUND_DATA *const comp_data, BLOCK_SIZE sb_type,
int wedge_offset_x, int wedge_offset_y, int h, int w) {
const int subh = (2 << b_height_log2_lookup[sb_type]) == h;
const int subw = (2 << b_width_log2_lookup[sb_type]) == w;
const uint8_t *mask = av1_get_soft_mask(wedge_index, wedge_sign, sb_type,
wedge_offset_x, wedge_offset_y);
const uint8_t *mask;
size_t mask_stride;
switch (comp_data->type) {
case COMPOUND_WEDGE:
mask = av1_get_soft_mask(comp_data->wedge_index, comp_data->wedge_sign,
sb_type, wedge_offset_x, wedge_offset_y);
mask_stride = MASK_MASTER_STRIDE;
break;
#if CONFIG_COMPOUND_SEGMENT
case COMPOUND_SEG:
mask = comp_data->seg_mask;
mask_stride = block_size_wide[sb_type];
break;
#endif
default: assert(0); return;
}
aom_blend_a64_mask(dst, dst_stride, src0, src0_stride, src1, src1_stride,
mask, MASK_MASTER_STRIDE, h, w, subh, subw);
mask, mask_stride, h, w, subh, subw);
}
#if CONFIG_AOM_HIGHBITDEPTH
static void build_masked_compound_wedge_extend_highbd(
uint8_t *dst_8, int dst_stride, const uint8_t *src0_8, int src0_stride,
const uint8_t *src1_8, int src1_stride, int wedge_index, int wedge_sign,
BLOCK_SIZE sb_type, int wedge_offset_x, int wedge_offset_y, int h, int w,
int bd) {
const uint8_t *src1_8, int src1_stride,
const INTERINTER_COMPOUND_DATA *const comp_data, BLOCK_SIZE sb_type,
int wedge_offset_x, int wedge_offset_y, int h, int w, int bd) {
const int subh = (2 << b_height_log2_lookup[sb_type]) == h;
const int subw = (2 << b_width_log2_lookup[sb_type]) == w;
const uint8_t *mask = av1_get_soft_mask(wedge_index, wedge_sign, sb_type,
wedge_offset_x, wedge_offset_y);
const uint8_t *mask;
size_t mask_stride;
switch (comp_data->type) {
case COMPOUND_WEDGE:
mask = av1_get_soft_mask(comp_data->wedge_index, comp_data->wedge_sign,
sb_type, wedge_offset_x, wedge_offset_y);
mask_stride = MASK_MASTER_STRIDE;
break;
#if CONFIG_COMPOUND_SEGMENT
case COMPOUND_SEG:
mask = comp_data->seg_mask;
mask_stride = block_size_wide[sb_type];
break;
#endif
default: assert(0); return;
}
aom_highbd_blend_a64_mask(dst_8, dst_stride, src0_8, src0_stride, src1_8,
src1_stride, mask, MASK_MASTER_STRIDE, h, w, subh,
subw, bd);
src1_stride, mask, mask_stride, h, w, subh, subw,
bd);
}
#endif // CONFIG_AOM_HIGHBITDEPTH
#endif // CONFIG_SUPERTX
#else
static void build_masked_compound(
uint8_t *dst, int dst_stride, const uint8_t *src0, int src0_stride,
const uint8_t *src1, int src1_stride,
......@@ -587,6 +615,7 @@ static void build_masked_compound_highbd(
subh, subw, bd);
}
#endif // CONFIG_AOM_HIGHBITDEPTH
#endif // CONFIG_SUPERTX
void av1_make_masked_inter_predictor(const uint8_t *pre, int pre_stride,
uint8_t *dst, int dst_stride,
......@@ -622,6 +651,7 @@ void av1_make_masked_inter_predictor(const uint8_t *pre, int pre_stride,
InterpFilter tmp_ipf = interp_filter;
#endif // CONFIG_DUAL_FILTER
ConvolveParams conv_params = get_conv_params(0);
#if CONFIG_AOM_HIGHBITDEPTH
DECLARE_ALIGNED(16, uint8_t, tmp_dst_[2 * MAX_SB_SQUARE]);
uint8_t *tmp_dst = (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH)
......@@ -633,18 +663,6 @@ void av1_make_masked_inter_predictor(const uint8_t *pre, int pre_stride,
is_global, p_col, p_row, plane, ref,
#endif // CONFIG_GLOBAL_MOTION
xs, ys, xd);
#if CONFIG_SUPERTX
if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH)
build_masked_compound_wedge_extend_highbd(
dst, dst_stride, dst, dst_stride, tmp_dst, MAX_SB_SIZE,
comp_data->wedge_index, comp_data->wedge_sign, mi->mbmi.sb_type,
wedge_offset_x, wedge_offset_y, h, w, xd->bd);
else
build_masked_compound_wedge_extend(
dst, dst_stride, dst, dst_stride, tmp_dst, MAX_SB_SIZE,
comp_data->wedge_index, comp_data->wedge_sign, mi->mbmi.sb_type,
wedge_offset_x, wedge_offset_y, h, w);
#else
#if CONFIG_COMPOUND_SEGMENT
if (!plane && comp_data->type == COMPOUND_SEG) {
if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH)
......@@ -657,6 +675,17 @@ void av1_make_masked_inter_predictor(const uint8_t *pre, int pre_stride,
mi->mbmi.sb_type, h, w);
}
#endif // CONFIG_COMPOUND_SEGMENT
#if CONFIG_SUPERTX
if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH)
build_masked_compound_wedge_extend_highbd(
dst, dst_stride, dst, dst_stride, tmp_dst, MAX_SB_SIZE, comp_data,
mi->mbmi.sb_type, wedge_offset_x, wedge_offset_y, h, w, xd->bd);
else
build_masked_compound_wedge_extend(
dst, dst_stride, dst, dst_stride, tmp_dst, MAX_SB_SIZE, comp_data,
mi->mbmi.sb_type, wedge_offset_x, wedge_offset_y, h, w);
#else
if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH)
build_masked_compound_highbd(dst, dst_stride, dst, dst_stride, tmp_dst,
MAX_SB_SIZE, comp_data, mi->mbmi.sb_type, h, w,
......@@ -665,7 +694,8 @@ void av1_make_masked_inter_predictor(const uint8_t *pre, int pre_stride,
build_masked_compound(dst, dst_stride, dst, dst_stride, tmp_dst,
MAX_SB_SIZE, comp_data, mi->mbmi.sb_type, h, w);
#endif // CONFIG_SUPERTX
#else // CONFIG_AOM_HIGHBITDEPTH
#else // CONFIG_AOM_HIGHBITDEPTH
DECLARE_ALIGNED(16, uint8_t, tmp_dst[MAX_SB_SQUARE]);
av1_make_inter_predictor(pre, pre_stride, tmp_dst, MAX_SB_SIZE, subpel_x,
subpel_y, sf, w, h, &conv_params, tmp_ipf,
......@@ -673,18 +703,17 @@ void av1_make_masked_inter_predictor(const uint8_t *pre, int pre_stride,
is_global, p_col, p_row, plane, ref,
#endif // CONFIG_GLOBAL_MOTION
xs, ys, xd);
#if CONFIG_SUPERTX
build_masked_compound_wedge_extend(dst, dst_stride, dst, dst_stride, tmp_dst,
MAX_SB_SIZE, comp_data->wedge_index,
comp_data->wedge_sign, mi->mbmi.sb_type,
wedge_offset_x, wedge_offset_y, h, w);
#else
#if CONFIG_COMPOUND_SEGMENT
if (!plane && comp_data->type == COMPOUND_SEG)
build_compound_seg_mask(comp_data->seg_mask, comp_data->mask_type, dst,
dst_stride, tmp_dst, MAX_SB_SIZE, mi->mbmi.sb_type,
h, w);
#endif // CONFIG_COMPOUND_SEGMENT
#if CONFIG_SUPERTX
build_masked_compound_wedge_extend(dst, dst_stride, dst, dst_stride, tmp_dst,
MAX_SB_SIZE, comp_data, mi->mbmi.sb_type,
wedge_offset_x, wedge_offset_y, h, w);
#else
build_masked_compound(dst, dst_stride, dst, dst_stride, tmp_dst, MAX_SB_SIZE,
comp_data, mi->mbmi.sb_type, h, w);
#endif // CONFIG_SUPERTX
......@@ -2795,8 +2824,12 @@ void av1_build_inter_predictors_for_planes_single_buf(
}
static void build_wedge_inter_predictor_from_buf(
MACROBLOCKD *xd, int plane, int x, int y, int w, int h, uint8_t *ext_dst0,
int ext_dst_stride0, uint8_t *ext_dst1, int ext_dst_stride1) {
MACROBLOCKD *xd, int plane, int x, int y, int w, int h,
#if CONFIG_SUPERTX
int wedge_offset_x, int wedge_offset_y,
#endif // CONFIG_SUPERTX
uint8_t *ext_dst0, int ext_dst_stride0, uint8_t *ext_dst1,
int ext_dst_stride1) {
MB_MODE_INFO *const mbmi = &xd->mi[0]->mbmi;
const int is_compound = has_second_ref(mbmi);
MACROBLOCKD_PLANE *const pd = &xd->plane[plane];
......@@ -2827,6 +2860,21 @@ static void build_wedge_inter_predictor_from_buf(
ext_dst_stride1, mbmi->sb_type, h, w);
#endif // CONFIG_AOM_HIGHBITDEPTH
#endif // CONFIG_COMPOUND_SEGMENT
#if CONFIG_SUPERTX
#if CONFIG_AOM_HIGHBITDEPTH
if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH)
build_masked_compound_wedge_extend_highbd(
dst, dst_buf->stride, CONVERT_TO_BYTEPTR(ext_dst0), ext_dst_stride0,
CONVERT_TO_BYTEPTR(ext_dst1), ext_dst_stride1, comp_data,
mbmi->sb_type, wedge_offset_x, wedge_offset_y, h, w, xd->bd);
else
#endif // CONFIG_AOM_HIGHBITDEPTH
build_masked_compound_wedge_extend(
dst, dst_buf->stride, ext_dst0, ext_dst_stride0, ext_dst1,
ext_dst_stride1, comp_data, mbmi->sb_type, wedge_offset_x,
wedge_offset_y, h, w);
#else
#if CONFIG_AOM_HIGHBITDEPTH
if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH)
build_masked_compound_highbd(
......@@ -2838,6 +2886,7 @@ static void build_wedge_inter_predictor_from_buf(
build_masked_compound(dst, dst_buf->stride, ext_dst0, ext_dst_stride0,
ext_dst1, ext_dst_stride1, comp_data, mbmi->sb_type,
h, w);
#endif // CONFIG_SUPERTX
} else {
#if CONFIG_AOM_HIGHBITDEPTH
if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH)
......@@ -2851,12 +2900,13 @@ static void build_wedge_inter_predictor_from_buf(
}
}
void av1_build_wedge_inter_predictor_from_buf(MACROBLOCKD *xd, BLOCK_SIZE bsize,
int plane_from, int plane_to,
uint8_t *ext_dst0[3],
int ext_dst_stride0[3],
uint8_t *ext_dst1[3],
int ext_dst_stride1[3]) {
void av1_build_wedge_inter_predictor_from_buf(
MACROBLOCKD *xd, BLOCK_SIZE bsize, int plane_from, int plane_to,
#if CONFIG_SUPERTX
int wedge_offset_x, int wedge_offset_y,
#endif // CONFIG_SUPERTX
uint8_t *ext_dst0[3], int ext_dst_stride0[3], uint8_t *ext_dst1[3],
int ext_dst_stride1[3]) {
int plane;
for (plane = plane_from; plane <= plane_to; ++plane) {
const BLOCK_SIZE plane_bsize =
......@@ -2870,14 +2920,22 @@ void av1_build_wedge_inter_predictor_from_buf(MACROBLOCKD *xd, BLOCK_SIZE bsize,
for (y = 0; y < num_4x4_h; ++y)
for (x = 0; x < num_4x4_w; ++x)
build_wedge_inter_predictor_from_buf(
xd, plane, 4 * x, 4 * y, 4, 4, ext_dst0[plane],
ext_dst_stride0[plane], ext_dst1[plane], ext_dst_stride1[plane]);
xd, plane, 4 * x, 4 * y, 4, 4,
#if CONFIG_SUPERTX
wedge_offset_x, wedge_offset_y,
#endif // CONFIG_SUPERTX
ext_dst0[plane], ext_dst_stride0[plane], ext_dst1[plane],
ext_dst_stride1[plane]);
} else {
const int bw = block_size_wide[plane_bsize];
const int bh = block_size_high[plane_bsize];
build_wedge_inter_predictor_from_buf(
xd, plane, 0, 0, bw, bh, ext_dst0[plane], ext_dst_stride0[plane],
ext_dst1[plane], ext_dst_stride1[plane]);
xd, plane, 0, 0, bw, bh,
#if CONFIG_SUPERTX
wedge_offset_x, wedge_offset_y,
#endif // CONFIG_SUPERTX
ext_dst0[plane], ext_dst_stride0[plane], ext_dst1[plane],
ext_dst_stride1[plane]);
}
}
}
......
......@@ -593,12 +593,13 @@ void av1_combine_interintra(MACROBLOCKD *xd, BLOCK_SIZE bsize, int plane,
void av1_build_inter_predictors_for_planes_single_buf(
MACROBLOCKD *xd, BLOCK_SIZE bsize, int plane_from, int plane_to, int mi_row,
int mi_col, int ref, uint8_t *ext_dst[3], int ext_dst_stride[3]);
void av1_build_wedge_inter_predictor_from_buf(MACROBLOCKD *xd, BLOCK_SIZE bsize,
int plane_from, int plane_to,
uint8_t *ext_dst0[3],
int ext_dst_stride0[3],
uint8_t *ext_dst1[3],
int ext_dst_stride1[3]);
void av1_build_wedge_inter_predictor_from_buf(
MACROBLOCKD *xd, BLOCK_SIZE bsize, int plane_from, int plane_to,
#if CONFIG_SUPERTX
int wedge_offset_x, int wedge_offset_y,
#endif // CONFIG_SUPERTX
uint8_t *ext_dst0[3], int ext_dst_stride0[3], uint8_t *ext_dst1[3],
int ext_dst_stride1[3]);
#endif // CONFIG_EXT_INTER
#ifdef __cplusplus
......
......@@ -7726,8 +7726,12 @@ static int64_t build_and_cost_compound_seg(
mbmi->mv[0].as_int = cur_mv[0].as_int;
mbmi->mv[1].as_int = cur_mv[1].as_int;
*out_rate_mv = rate_mv;
av1_build_wedge_inter_predictor_from_buf(xd, bsize, 0, 0, preds0, strides,
preds1, strides);
av1_build_wedge_inter_predictor_from_buf(xd, bsize, 0, 0,
#if CONFIG_SUPERTX
0, 0,
#endif // CONFIG_SUPERTX
preds0, strides, preds1,
strides);
}
av1_subtract_plane(x, bsize, 0);
rd = estimate_yrd_for_sb(cpi, bsize, x, &rate_sum, &dist_sum,
......@@ -7737,8 +7741,11 @@ static int64_t build_and_cost_compound_seg(
best_rd_cur = rd;
} else {
av1_build_wedge_inter_predictor_from_buf(xd, bsize, 0, 0, preds0, strides,
preds1, strides);
av1_build_wedge_inter_predictor_from_buf(xd, bsize, 0, 0,
#if CONFIG_SUPERTX
0, 0,
#endif // CONFIG_SUPERTX
preds0, strides, preds1, strides);
av1_subtract_plane(x, bsize, 0);
rd = estimate_yrd_for_sb(cpi, bsize, x, &rate_sum, &dist_sum,
&tmp_skip_txfm_sb, &tmp_skip_sse_sb, INT64_MAX);
......@@ -7779,8 +7786,12 @@ static int64_t build_and_cost_compound_wedge(
mbmi->mv[0].as_int = cur_mv[0].as_int;
mbmi->mv[1].as_int = cur_mv[1].as_int;
*out_rate_mv = rate_mv;
av1_build_wedge_inter_predictor_from_buf(xd, bsize, 0, 0, preds0, strides,
preds1, strides);
av1_build_wedge_inter_predictor_from_buf(xd, bsize, 0, 0,
#if CONFIG_SUPERTX
0, 0,
#endif // CONFIG_SUPERTX
preds0, strides, preds1,
strides);
}
av1_subtract_plane(x, bsize, 0);
rd = estimate_yrd_for_sb(cpi, bsize, x, &rate_sum, &dist_sum,
......@@ -7790,8 +7801,11 @@ static int64_t build_and_cost_compound_wedge(
best_rd_cur = rd;
} else {
av1_build_wedge_inter_predictor_from_buf(xd, bsize, 0, 0, preds0, strides,
preds1, strides);
av1_build_wedge_inter_predictor_from_buf(xd, bsize, 0, 0,
#if CONFIG_SUPERTX
0, 0,
#endif // CONFIG_SUPERTX
preds0, strides, preds1, strides);
av1_subtract_plane(x, bsize, 0);
rd = estimate_yrd_for_sb(cpi, bsize, x, &rate_sum, &dist_sum,
&tmp_skip_txfm_sb, &tmp_skip_sse_sb, INT64_MAX);
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment