Commit 37f6fe61 authored by Debargha Mukherjee's avatar Debargha Mukherjee

Add config flags and macros to control ext-inter

Adds a dependent config flag 'interintra' to turn on/off interintra
modes altogether.
Adds a dependent config flag 'wedge' to turn on/off wedge compound
for both interinter and interintra.

Adds another macro to change wedge predictors to use
only 0, 1/2, or 1 weights.

From now, use
--enable-ext-inter --enable-wedge --enable-interintra to get the
same behavior as the old --enable-ext-inter.

Change-Id: I2e787e6994163b6b859a9d6431b87c4217834ddc
parent 7e08ac3f
......@@ -41,11 +41,8 @@ extern "C" {
#define MAX_MB_PLANE 3
#if CONFIG_EXT_INTER
// Should we try rectangular interintra predictions?
#define USE_RECT_INTERINTRA 1
#if CONFIG_COMPOUND_SEGMENT
// Set COMPOUND_SEGMENT_TYPE to one of the three
// 0: Uniform
// 1: Difference weighted
......@@ -1026,19 +1023,31 @@ void av1_set_contexts(const MACROBLOCKD *xd, struct macroblockd_plane *pd,
#if CONFIG_EXT_INTER
static INLINE int is_interintra_allowed_bsize(const BLOCK_SIZE bsize) {
#if !USE_RECT_INTERINTRA
if (block_size_wide[bsize] != block_size_high[bsize]) return 0;
#endif
#if CONFIG_INTERINTRA
// TODO(debargha): Should this be bsize < BLOCK_LARGEST?
return (bsize >= BLOCK_8X8) && (bsize < BLOCK_64X64);
#else
(void)bsize;
return 0;
#endif // CONFIG_INTERINTRA
}
static INLINE int is_interintra_allowed_mode(const PREDICTION_MODE mode) {
#if CONFIG_INTERINTRA
return (mode >= NEARESTMV) && (mode <= NEWMV);
#else
(void)mode;
return 0;
#endif // CONFIG_INTERINTRA
}
static INLINE int is_interintra_allowed_ref(const MV_REFERENCE_FRAME rf[2]) {
#if CONFIG_INTERINTRA
return (rf[0] > INTRA_FRAME) && (rf[1] <= INTRA_FRAME);
#else
(void)rf;
return 0;
#endif // CONFIG_INTERINTRA
}
static INLINE int is_interintra_allowed(const MB_MODE_INFO *mbmi) {
......
......@@ -28,13 +28,21 @@
#if CONFIG_EXT_INTER
#define NSMOOTHERS 1
#define USE_SOFT_WEIGHTS_IN_WEDGE 1
static int get_masked_weight(int m, int smoothness) {
#define SMOOTHER_LEN 32
static const uint8_t smoothfn[NSMOOTHERS][2 * SMOOTHER_LEN + 1] = { {
#if USE_SOFT_WEIGHTS_IN_WEDGE
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2, 4, 7, 13, 21, 32, 43,
51, 57, 60, 62, 63, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64,
64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64,
#else
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 64, 64, 32, 64,
64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64,
64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64,
#endif // USE_SOFT_WEIGHTS_IN_WEDGE
} };
if (m < -SMOOTHER_LEN)
return 0;
......@@ -60,7 +68,8 @@ DECLARE_ALIGNED(16, static uint8_t,
static wedge_masks_type wedge_masks[BLOCK_SIZES][2];
// Some unused wedge codebooks left temporarily to facilitate experiments.
// To be removed when setteld.
// To be removed when settled.
/*
static wedge_code_type wedge_codebook_8_hgtw[8] = {
{ WEDGE_OBLIQUE27, 4, 4 }, { WEDGE_OBLIQUE63, 4, 4 },
{ WEDGE_OBLIQUE117, 4, 4 }, { WEDGE_OBLIQUE153, 4, 4 },
......@@ -81,6 +90,7 @@ static wedge_code_type wedge_codebook_8_heqw[8] = {
{ WEDGE_HORIZONTAL, 4, 2 }, { WEDGE_HORIZONTAL, 4, 6 },
{ WEDGE_VERTICAL, 2, 4 }, { WEDGE_VERTICAL, 6, 4 },
};
*/
#if !USE_LARGE_WEDGE_CODEBOOK
static const wedge_code_type wedge_codebook_16_hgtw[16] = {
......@@ -125,6 +135,7 @@ const wedge_params_type wedge_params_lookup[BLOCK_SIZES] = {
{ 0, NULL, NULL, 0, NULL },
{ 0, NULL, NULL, 0, NULL },
{ 0, NULL, NULL, 0, NULL },
#if CONFIG_WEDGE
{ 4, wedge_codebook_16_heqw, wedge_signflip_lookup[BLOCK_8X8], 0,
wedge_masks[BLOCK_8X8] },
{ 4, wedge_codebook_16_hgtw, wedge_signflip_lookup[BLOCK_8X16], 0,
......@@ -139,12 +150,34 @@ const wedge_params_type wedge_params_lookup[BLOCK_SIZES] = {
wedge_masks[BLOCK_32X16] },
{ 4, wedge_codebook_16_heqw, wedge_signflip_lookup[BLOCK_32X32], 0,
wedge_masks[BLOCK_32X32] },
{ 0, wedge_codebook_8_hgtw, wedge_signflip_lookup[BLOCK_32X64], 0,
{ 0, wedge_codebook_16_hgtw, wedge_signflip_lookup[BLOCK_32X64], 0,
wedge_masks[BLOCK_32X64] },
{ 0, wedge_codebook_16_hltw, wedge_signflip_lookup[BLOCK_64X32], 0,
wedge_masks[BLOCK_64X32] },
{ 0, wedge_codebook_16_heqw, wedge_signflip_lookup[BLOCK_64X64], 0,
wedge_masks[BLOCK_64X64] },
#else
{ 0, wedge_codebook_16_heqw, wedge_signflip_lookup[BLOCK_8X8], 0,
wedge_masks[BLOCK_8X8] },
{ 0, wedge_codebook_16_hgtw, wedge_signflip_lookup[BLOCK_8X16], 0,
wedge_masks[BLOCK_8X16] },
{ 0, wedge_codebook_16_hltw, wedge_signflip_lookup[BLOCK_16X8], 0,
wedge_masks[BLOCK_16X8] },
{ 0, wedge_codebook_16_heqw, wedge_signflip_lookup[BLOCK_16X16], 0,
wedge_masks[BLOCK_16X16] },
{ 0, wedge_codebook_16_hgtw, wedge_signflip_lookup[BLOCK_16X32], 0,
wedge_masks[BLOCK_16X32] },
{ 0, wedge_codebook_16_hltw, wedge_signflip_lookup[BLOCK_32X16], 0,
wedge_masks[BLOCK_32X16] },
{ 0, wedge_codebook_16_heqw, wedge_signflip_lookup[BLOCK_32X32], 0,
wedge_masks[BLOCK_32X32] },
{ 0, wedge_codebook_16_hgtw, wedge_signflip_lookup[BLOCK_32X64], 0,
wedge_masks[BLOCK_32X64] },
{ 0, wedge_codebook_8_hltw, wedge_signflip_lookup[BLOCK_64X32], 0,
{ 0, wedge_codebook_16_hltw, wedge_signflip_lookup[BLOCK_64X32], 0,
wedge_masks[BLOCK_64X32] },
{ 0, wedge_codebook_8_heqw, wedge_signflip_lookup[BLOCK_64X64], 0,
{ 0, wedge_codebook_16_heqw, wedge_signflip_lookup[BLOCK_64X64], 0,
wedge_masks[BLOCK_64X64] },
#endif // CONFIG_WEDGE
#if CONFIG_EXT_PARTITION
{ 0, NULL, NULL, 0, NULL },
{ 0, NULL, NULL, 0, NULL },
......@@ -220,6 +253,7 @@ const wedge_params_type wedge_params_lookup[BLOCK_SIZES] = {
{ 0, NULL, NULL, 0, NULL },
{ 0, NULL, NULL, 0, NULL },
{ 0, NULL, NULL, 0, NULL },
#if CONFIG_WEDGE
{ 5, wedge_codebook_32_heqw, wedge_signflip_lookup[BLOCK_8X8], 0,
wedge_masks[BLOCK_8X8] },
{ 5, wedge_codebook_32_hgtw, wedge_signflip_lookup[BLOCK_8X16], 0,
......@@ -234,12 +268,34 @@ const wedge_params_type wedge_params_lookup[BLOCK_SIZES] = {
wedge_masks[BLOCK_32X16] },
{ 5, wedge_codebook_32_heqw, wedge_signflip_lookup[BLOCK_32X32], 0,
wedge_masks[BLOCK_32X32] },
{ 0, wedge_codebook_8_hgtw, wedge_signflip_lookup[BLOCK_32X64], 0,
{ 0, wedge_codebook_32_hgtw, wedge_signflip_lookup[BLOCK_32X64], 0,
wedge_masks[BLOCK_32X64] },
{ 0, wedge_codebook_32_hltw, wedge_signflip_lookup[BLOCK_64X32], 0,
wedge_masks[BLOCK_64X32] },
{ 0, wedge_codebook_32_heqw, wedge_signflip_lookup[BLOCK_64X64], 0,
wedge_masks[BLOCK_64X64] },
#else
{ 0, wedge_codebook_32_heqw, wedge_signflip_lookup[BLOCK_8X8], 0,
wedge_masks[BLOCK_8X8] },
{ 0, wedge_codebook_32_hgtw, wedge_signflip_lookup[BLOCK_8X16], 0,
wedge_masks[BLOCK_8X16] },
{ 0, wedge_codebook_32_hltw, wedge_signflip_lookup[BLOCK_16X8], 0,
wedge_masks[BLOCK_16X8] },
{ 0, wedge_codebook_32_heqw, wedge_signflip_lookup[BLOCK_16X16], 0,
wedge_masks[BLOCK_16X16] },
{ 0, wedge_codebook_32_hgtw, wedge_signflip_lookup[BLOCK_16X32], 0,
wedge_masks[BLOCK_16X32] },
{ 0, wedge_codebook_32_hltw, wedge_signflip_lookup[BLOCK_32X16], 0,
wedge_masks[BLOCK_32X16] },
{ 0, wedge_codebook_32_heqw, wedge_signflip_lookup[BLOCK_32X32], 0,
wedge_masks[BLOCK_32X32] },
{ 0, wedge_codebook_32_hgtw, wedge_signflip_lookup[BLOCK_32X64], 0,
wedge_masks[BLOCK_32X64] },
{ 0, wedge_codebook_8_hltw, wedge_signflip_lookup[BLOCK_64X32], 0,
{ 0, wedge_codebook_32_hltw, wedge_signflip_lookup[BLOCK_64X32], 0,
wedge_masks[BLOCK_64X32] },
{ 0, wedge_codebook_8_heqw, wedge_signflip_lookup[BLOCK_64X64], 0,
{ 0, wedge_codebook_32_heqw, wedge_signflip_lookup[BLOCK_64X64], 0,
wedge_masks[BLOCK_64X64] },
#endif // CONFIG_WEDGE
#if CONFIG_EXT_PARTITION
{ 0, NULL, NULL, 0, NULL },
{ 0, NULL, NULL, 0, NULL },
......
......@@ -2262,8 +2262,7 @@ void av1_predict_intra_block(const MACROBLOCKD *xd, int wpx, int hpx,
predict_square_intra_block(xd, wpx, hpx, tx_size, mode, ref, ref_stride,
dst, dst_stride, col_off, row_off, plane);
} else {
#if (CONFIG_RECT_TX && (CONFIG_VAR_TX || CONFIG_EXT_TX)) || \
(CONFIG_EXT_INTER && USE_RECT_INTERINTRA)
#if (CONFIG_RECT_TX && (CONFIG_VAR_TX || CONFIG_EXT_TX)) || (CONFIG_EXT_INTER)
#if CONFIG_AOM_HIGHBITDEPTH
uint16_t tmp16[MAX_SB_SIZE];
#endif
......@@ -2385,7 +2384,7 @@ void av1_predict_intra_block(const MACROBLOCKD *xd, int wpx, int hpx,
#else
assert(0);
#endif // (CONFIG_RECT_TX && (CONFIG_VAR_TX || CONFIG_EXT_TX)) ||
// (CONFIG_EXT_INTER && USE_RECT_INTERINTRA)
// (CONFIG_EXT_INTER)
}
}
......
......@@ -9933,17 +9933,7 @@ void av1_rd_pick_inter_mode_sb(const AV1_COMP *cpi, TileDataEnc *tile_data,
#if CONFIG_EXT_INTER
if (ref_frame > INTRA_FRAME && second_ref_frame == INTRA_FRAME) {
// Mode must by compatible
assert(is_interintra_allowed_mode(this_mode));
#if !USE_RECT_INTERINTRA
// Note: If the subsampling is unequal, any block size we pick will
// result in either a rectangular luma block or a rectangular chroma
// block. So in this case, we can't use any interintra modes.
if (xd->plane[1].subsampling_x != xd->plane[1].subsampling_y ||
xd->plane[2].subsampling_x != xd->plane[2].subsampling_y)
continue;
#endif // !USE_RECT_INTERINTRA
if (!is_interintra_allowed_mode(this_mode)) continue;
if (!is_interintra_allowed_bsize(bsize)) continue;
}
......
......@@ -260,6 +260,8 @@ EXPERIMENT_LIST="
intra_interp
filter_intra
ext_inter
interintra
wedge
compound_segment
ext_refs
global_motion
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment