Commit 902ee506 authored by Debargha Mukherjee's avatar Debargha Mukherjee
Browse files

A crash fix for supertx / ext-inter combination.

Change-Id: I9860376c98aa3b25f5bf86ed13d4a7631fa6b153
parent d2ca083c
......@@ -333,22 +333,15 @@ static const uint8_t *get_wedge_mask_inplace(int wedge_index,
return master;
}
static const uint8_t *get_wedge_mask(int wedge_index,
int neg,
BLOCK_SIZE bsize) {
return wedge_params_lookup[bsize].masks[neg][wedge_index];
}
const uint8_t *vp10_get_soft_mask(int wedge_index,
int wedge_sign,
BLOCK_SIZE sb_type,
int offset_x,
int offset_y) {
const int bw = 4 * num_4x4_blocks_wide_lookup[sb_type];
const uint8_t *mask =
get_wedge_mask(wedge_index, wedge_sign, sb_type);
get_wedge_mask_inplace(wedge_index, wedge_sign, sb_type);
if (mask)
mask -= (offset_x + offset_y * bw);
mask -= (offset_x + offset_y * MASK_MASTER_STRIDE);
return mask;
}
......@@ -469,7 +462,7 @@ static void build_masked_compound_wedge_extend(
vpx_blend_mask6(dst, dst_stride,
src0, src0_stride,
src1, src1_stride,
mask, 4 * num_4x4_blocks_wide_lookup[sb_type],
mask, MASK_MASTER_STRIDE,
h, w, subh, subw);
}
......@@ -489,7 +482,7 @@ static void build_masked_compound_wedge_extend_highbd(
vpx_highbd_blend_mask6(dst_8, dst_stride,
src0_8, src0_stride,
src1_8, src1_stride,
mask, 4 * num_4x4_blocks_wide_lookup[sb_type],
mask, MASK_MASTER_STRIDE,
h, w, subh, subw, bd);
}
#endif // CONFIG_VP9_HIGHBITDEPTH
......@@ -506,8 +499,8 @@ static void build_masked_compound_wedge(uint8_t *dst, int dst_stride,
// pass in subsampling factors directly.
const int subh = (2 << b_height_log2_lookup[sb_type]) == h;
const int subw = (2 << b_width_log2_lookup[sb_type]) == w;
const uint8_t *mask = vp10_get_soft_mask(wedge_index, wedge_sign,
sb_type, 0, 0);
const uint8_t *mask = vp10_get_contiguous_soft_mask(wedge_index, wedge_sign,
sb_type);
vpx_blend_mask6(dst, dst_stride,
src0, src0_stride,
src1, src1_stride,
......@@ -526,8 +519,8 @@ static void build_masked_compound_wedge_highbd(uint8_t *dst_8, int dst_stride,
// pass in subsampling factors directly.
const int subh = (2 << b_height_log2_lookup[sb_type]) == h;
const int subw = (2 << b_width_log2_lookup[sb_type]) == w;
const uint8_t *mask = vp10_get_soft_mask(wedge_index, wedge_sign,
sb_type, 0, 0);
const uint8_t *mask = vp10_get_contiguous_soft_mask(wedge_index, wedge_sign,
sb_type);
vpx_highbd_blend_mask6(dst_8, dst_stride,
src0_8, src0_stride,
src1_8, src1_stride,
......@@ -1887,8 +1880,9 @@ static void combine_interintra(INTERINTRA_MODE mode,
if (use_wedge_interintra) {
if (is_interintra_wedge_used(bsize)) {
const uint8_t *mask = vp10_get_soft_mask(wedge_index, wedge_sign,
bsize, 0, 0);
const uint8_t *mask = vp10_get_contiguous_soft_mask(wedge_index,
wedge_sign,
bsize);
const int subw = 2 * num_4x4_blocks_wide_lookup[bsize] == bw;
const int subh = 2 * num_4x4_blocks_high_lookup[bsize] == bh;
vpx_blend_mask6(comppred, compstride,
......@@ -2026,8 +2020,9 @@ static void combine_interintra_highbd(INTERINTRA_MODE mode,
if (use_wedge_interintra) {
if (is_interintra_wedge_used(bsize)) {
const uint8_t *mask = vp10_get_soft_mask(wedge_index, wedge_sign,
bsize, 0, 0);
const uint8_t *mask = vp10_get_contiguous_soft_mask(wedge_index,
wedge_sign,
bsize);
const int subh = 2 * num_4x4_blocks_high_lookup[bsize] == bh;
const int subw = 2 * num_4x4_blocks_wide_lookup[bsize] == bw;
vpx_highbd_blend_mask6(comppred8, compstride,
......
......@@ -589,6 +589,12 @@ void vp10_build_prediction_by_left_preds(VP10_COMMON *cm,
void vp10_init_wedge_masks();
static INLINE const uint8_t *vp10_get_contiguous_soft_mask(int wedge_index,
int wedge_sign,
BLOCK_SIZE sb_type) {
return wedge_params_lookup[sb_type].masks[wedge_sign][wedge_index];
}
const uint8_t *vp10_get_soft_mask(int wedge_index,
int wedge_sign,
BLOCK_SIZE sb_type,
......
......@@ -6512,7 +6512,7 @@ static void do_masked_motion_search_indexed(VP10_COMP *cpi, MACROBLOCK *x,
BLOCK_SIZE sb_type = mbmi->sb_type;
const uint8_t *mask;
const int mask_stride = 4 * num_4x4_blocks_wide_lookup[bsize];
mask = vp10_get_soft_mask(wedge_index, wedge_sign, sb_type, 0, 0);
mask = vp10_get_contiguous_soft_mask(wedge_index, wedge_sign, sb_type);
if (which == 0 || which == 2)
do_masked_motion_search(cpi, x, mask, mask_stride, bsize,
......@@ -6521,7 +6521,7 @@ static void do_masked_motion_search_indexed(VP10_COMP *cpi, MACROBLOCK *x,
if (which == 1 || which == 2) {
// get the negative mask
mask = vp10_get_soft_mask(wedge_index, !wedge_sign, sb_type, 0, 0);
mask = vp10_get_contiguous_soft_mask(wedge_index, !wedge_sign, sb_type);
do_masked_motion_search(cpi, x, mask, mask_stride, bsize,
mi_row, mi_col, &tmp_mv[1], &rate_mv[1],
1, mv_idx[1]);
......@@ -7577,8 +7577,8 @@ static int64_t handle_inter_mode(VP10_COMP *cpi, MACROBLOCK *x,
// Refine motion vector.
if (have_newmv_in_inter_mode(this_mode) && best_wedge_index > -1) {
// get negative of mask
const uint8_t* mask = vp10_get_soft_mask(
best_wedge_index, 1, bsize, 0, 0);
const uint8_t* mask = vp10_get_contiguous_soft_mask(
best_wedge_index, 1, bsize);
mbmi->interintra_wedge_index = best_wedge_index;
mbmi->interintra_wedge_sign = 0;
do_masked_motion_search(cpi, x, mask, bw, bsize,
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment