Commit f34deab2 authored by Debargha Mukherjee's avatar Debargha Mukherjee
Browse files

Adds compound wedge prediction modes

Incorporates wedge compound prediction modes.

Change-Id: Ie73b54b629105b9dcc5f3763be87f35b09ad2ec7
parent cf9c95c3
......@@ -45,6 +45,7 @@ typedef enum {
#endif // CONFIG_EXT_INTERP && SUPPORT_NONINTERPOLATING_FILTERS
#define MAXTXLEN 32
#define CU_SIZE 64
static INLINE int is_inter_mode(PREDICTION_MODE mode) {
#if CONFIG_EXT_INTER
......@@ -55,6 +56,23 @@ static INLINE int is_inter_mode(PREDICTION_MODE mode) {
}
#if CONFIG_EXT_INTER
#define WEDGE_BITS_SML 3
#define WEDGE_BITS_MED 4
#define WEDGE_BITS_BIG 5
#define WEDGE_NONE -1
#define WEDGE_WEIGHT_BITS 6
static INLINE int get_wedge_bits(BLOCK_SIZE sb_type) {
if (sb_type < BLOCK_8X8)
return 0;
if (sb_type <= BLOCK_8X8)
return WEDGE_BITS_SML;
else if (sb_type <= BLOCK_32X32)
return WEDGE_BITS_MED;
else
return WEDGE_BITS_BIG;
}
static INLINE int is_inter_singleref_mode(PREDICTION_MODE mode) {
return mode >= NEARESTMV && mode <= NEWFROMNEARMV;
}
......@@ -69,6 +87,11 @@ static INLINE int have_newmv_in_inter_mode(PREDICTION_MODE mode) {
mode == NEAREST_NEWMV || mode == NEW_NEARESTMV ||
mode == NEAR_NEWMV || mode == NEW_NEARMV);
}
#else
static INLINE int have_newmv_in_inter_mode(PREDICTION_MODE mode) {
return (mode == NEWMV);
}
#endif // CONFIG_EXT_INTER
/* For keyframes, intra block modes are predicted by the (already decoded)
......@@ -172,6 +195,12 @@ typedef struct {
#if CONFIG_EXT_INTER
PREDICTION_MODE interintra_mode;
PREDICTION_MODE interintra_uv_mode;
// TODO(debargha): Consolidate these flags
int use_wedge_interintra;
int interintra_wedge_index;
int interintra_uv_wedge_index;
int use_wedge_interinter;
int interinter_wedge_index;
#endif // CONFIG_EXT_INTER
#if CONFIG_OBMC
......@@ -203,12 +232,6 @@ static INLINE int has_second_ref(const MB_MODE_INFO *mbmi) {
return mbmi->ref_frame[1] > INTRA_FRAME;
}
#if CONFIG_OBMC
static INLINE int is_obmc_allowed(const MB_MODE_INFO *mbmi) {
return (mbmi->sb_type >= BLOCK_8X8);
}
#endif // CONFIG_OBMC
PREDICTION_MODE vp10_left_block_mode(const MODE_INFO *cur_mi,
const MODE_INFO *left_mi, int b);
......@@ -647,6 +670,23 @@ static INLINE int is_interintra_pred(const MB_MODE_INFO *mbmi) {
}
#endif // CONFIG_EXT_INTER
#if CONFIG_OBMC
static INLINE int is_obmc_allowed(const MB_MODE_INFO *mbmi) {
return (mbmi->sb_type >= BLOCK_8X8);
}
static INLINE int is_neighbor_overlappable(const MB_MODE_INFO *mbmi) {
#if CONFIG_EXT_INTER
return (is_inter_block(mbmi) &&
!(has_second_ref(mbmi) && get_wedge_bits(mbmi->sb_type) &&
mbmi->use_wedge_interinter) &&
!(is_interintra_pred(mbmi)));
#else
return (is_inter_block(mbmi));
#endif // CONFIG_EXT_INTER
}
#endif // CONFIG_OBMC
#ifdef __cplusplus
} // extern "C"
#endif
......
......@@ -10,6 +10,7 @@
#include "vpx_mem/vpx_mem.h"
#include "vp10/common/reconinter.h"
#include "vp10/common/onyxc_int.h"
#include "vp10/common/seg_common.h"
......@@ -190,8 +191,8 @@ static const vpx_prob default_drl_prob[DRL_MODE_CONTEXTS] = {
#if CONFIG_EXT_INTER
static const vpx_prob default_new2mv_prob = 180;
#endif
#endif
#endif // CONFIG_EXT_INTER
#endif // CONFIG_REF_MV
static const vpx_prob default_inter_mode_probs[INTER_MODE_CONTEXTS]
[INTER_MODES - 1] = {
......@@ -230,6 +231,14 @@ static const vpx_prob default_inter_compound_mode_probs
static const vpx_prob default_interintra_prob[BLOCK_SIZES] = {
192, 192, 192, 192, 192, 192, 192, 192, 192, 192, 192, 192, 192,
};
static const vpx_prob default_wedge_interintra_prob[BLOCK_SIZES] = {
192, 192, 192, 192, 192, 192, 192, 192, 192, 192, 192, 192, 192,
};
static const vpx_prob default_wedge_interinter_prob[BLOCK_SIZES] = {
192, 192, 192, 192, 192, 192, 192, 192, 192, 192, 192, 192, 192,
};
#endif // CONFIG_EXT_INTER
#if CONFIG_OBMC
......@@ -1337,6 +1346,8 @@ static void init_mode_probs(FRAME_CONTEXT *fc) {
#if CONFIG_EXT_INTER
vp10_copy(fc->inter_compound_mode_probs, default_inter_compound_mode_probs);
vp10_copy(fc->interintra_prob, default_interintra_prob);
vp10_copy(fc->wedge_interintra_prob, default_wedge_interintra_prob);
vp10_copy(fc->wedge_interinter_prob, default_wedge_interinter_prob);
#endif // CONFIG_EXT_INTER
#if CONFIG_SUPERTX
vp10_copy(fc->supertx_prob, default_supertx_prob);
......@@ -1445,12 +1456,21 @@ void vp10_adapt_inter_frame_probs(VP10_COMMON *cm) {
pre_fc->inter_compound_mode_probs[i],
counts->inter_compound_mode[i],
fc->inter_compound_mode_probs[i]);
for (i = 0; i < BLOCK_SIZES; ++i) {
if (is_interintra_allowed_bsize(i))
fc->interintra_prob[i] = mode_mv_merge_probs(pre_fc->interintra_prob[i],
counts->interintra[i]);
}
for (i = 0; i < BLOCK_SIZES; ++i) {
if (is_interintra_allowed_bsize(i) && get_wedge_bits(i))
fc->wedge_interintra_prob[i] = mode_mv_merge_probs(
pre_fc->wedge_interintra_prob[i], counts->wedge_interintra[i]);
}
for (i = 0; i < BLOCK_SIZES; ++i) {
if (get_wedge_bits(i))
fc->wedge_interinter_prob[i] = mode_mv_merge_probs(
pre_fc->wedge_interinter_prob[i], counts->wedge_interinter[i]);
}
#endif // CONFIG_EXT_INTER
for (i = 0; i < BLOCK_SIZE_GROUPS; i++)
......
......@@ -74,13 +74,15 @@ typedef struct frame_contexts {
#if CONFIG_EXT_INTER
vpx_prob new2mv_prob;
#endif // CONFIG_EXT_INTER
#endif
#endif // CONFIG_REF_MV
vpx_prob inter_mode_probs[INTER_MODE_CONTEXTS][INTER_MODES - 1];
#if CONFIG_EXT_INTER
vpx_prob inter_compound_mode_probs[INTER_MODE_CONTEXTS]
[INTER_COMPOUND_MODES - 1];
vpx_prob interintra_prob[BLOCK_SIZES];
vpx_prob wedge_interintra_prob[BLOCK_SIZES];
vpx_prob wedge_interinter_prob[BLOCK_SIZES];
#endif // CONFIG_EXT_INTER
#if CONFIG_OBMC
vpx_prob obmc_prob[BLOCK_SIZES];
......@@ -143,6 +145,8 @@ typedef struct FRAME_COUNTS {
#if CONFIG_EXT_INTER
unsigned int inter_compound_mode[INTER_MODE_CONTEXTS][INTER_COMPOUND_MODES];
unsigned int interintra[BLOCK_SIZES][2];
unsigned int wedge_interintra[BLOCK_SIZES][2];
unsigned int wedge_interinter[BLOCK_SIZES][2];
#endif // CONFIG_EXT_INTER
#if CONFIG_OBMC
unsigned int obmc[BLOCK_SIZES][2];
......
......@@ -41,21 +41,32 @@ typedef enum BITSTREAM_PROFILE {
MAX_PROFILES
} BITSTREAM_PROFILE;
#define BLOCK_4X4 0
#define BLOCK_4X8 1
#define BLOCK_8X4 2
#define BLOCK_8X8 3
#define BLOCK_8X16 4
#define BLOCK_16X8 5
#define BLOCK_16X16 6
#define BLOCK_16X32 7
#define BLOCK_32X16 8
#define BLOCK_32X32 9
#define BLOCK_32X64 10
#define BLOCK_64X32 11
#define BLOCK_64X64 12
#define BLOCK_SIZES 13
#define BLOCK_INVALID BLOCK_SIZES
#define BLOCK_4X4 0
#define BLOCK_4X8 1
#define BLOCK_8X4 2
#define BLOCK_8X8 3
#define BLOCK_8X16 4
#define BLOCK_16X8 5
#define BLOCK_16X16 6
#define BLOCK_16X32 7
#define BLOCK_32X16 8
#define BLOCK_32X32 9
#define BLOCK_32X64 10
#define BLOCK_64X32 11
#define BLOCK_64X64 12
#if CONFIG_EXT_PARTITION
#define BLOCK_64X128 13
#define BLOCK_128X64 14
#define BLOCK_128X128 15
#define BLOCK_SIZES 16
#else
#define BLOCK_SIZES 13
#endif // CONFIG_EXT_PARTITION
#define BLOCK_INVALID (BLOCK_SIZES)
#define BLOCK_LARGEST (BLOCK_SIZES - 1)
typedef uint8_t BLOCK_SIZE;
typedef enum PARTITION_TYPE {
......
......@@ -22,9 +22,490 @@
#include "vp10/common/onyxc_int.h"
#endif // CONFIG_OBMC
// TODO(geza.lore) Update this when the extended coding unit size experiment
// have been ported.
#define CU_SIZE 64
#if CONFIG_EXT_INTER
static int get_masked_weight(int m) {
#define SMOOTHER_LEN 32
static const uint8_t smoothfn[2 * SMOOTHER_LEN + 1] = {
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 1, 1, 1,
1, 1, 2, 2, 3, 4, 5, 6,
8, 9, 12, 14, 17, 21, 24, 28,
32,
36, 40, 43, 47, 50, 52, 55, 56,
58, 59, 60, 61, 62, 62, 63, 63,
63, 63, 63, 64, 64, 64, 64, 64,
64, 64, 64, 64, 64, 64, 64, 64,
};
if (m < -SMOOTHER_LEN)
return 0;
else if (m > SMOOTHER_LEN)
return (1 << WEDGE_WEIGHT_BITS);
else
return smoothfn[m + SMOOTHER_LEN];
}
// [negative][transpose][reverse]
DECLARE_ALIGNED(16, static uint8_t,
wedge_mask_obl[2][2][2][MASK_MASTER_SIZE * MASK_MASTER_SIZE]);
// [negative][transpose]
DECLARE_ALIGNED(16, static uint8_t,
wedge_mask_str[2][2][MASK_MASTER_SIZE * MASK_MASTER_SIZE]);
void vp10_init_wedge_masks() {
int i, j;
const int w = MASK_MASTER_SIZE;
const int h = MASK_MASTER_SIZE;
const int stride = MASK_MASTER_STRIDE;
const int a[4] = {2, 1, 2, 2};
for (i = 0; i < h; ++i)
for (j = 0; j < w; ++j) {
int x = (2 * j + 1 - (a[2] * w) / 2);
int y = (2 * i + 1 - (a[3] * h) / 2);
int m = (a[0] * x + a[1] * y) / 2;
wedge_mask_obl[0][0][0][i * stride + j] =
wedge_mask_obl[0][1][0][j * stride + i] =
wedge_mask_obl[0][0][1][i * stride + w - 1 - j] =
wedge_mask_obl[0][1][1][(w - 1 - j) * stride + i] =
get_masked_weight(m);
wedge_mask_obl[1][0][0][i * stride + j] =
wedge_mask_obl[1][1][0][j * stride + i] =
wedge_mask_obl[1][0][1][i * stride + w - 1 - j] =
wedge_mask_obl[1][1][1][(w - 1 - j) * stride + i] =
(1 << WEDGE_WEIGHT_BITS) - get_masked_weight(m);
wedge_mask_str[0][0][i * stride + j] =
wedge_mask_str[0][1][j * stride + i] =
get_masked_weight(x);
wedge_mask_str[1][0][i * stride + j] =
wedge_mask_str[1][1][j * stride + i] =
(1 << WEDGE_WEIGHT_BITS) - get_masked_weight(x);
}
}
static const uint8_t *get_wedge_mask_inplace(const int *a,
int h, int w) {
const int woff = (a[2] * w) >> 2;
const int hoff = (a[3] * h) >> 2;
const int oblique = (abs(a[0]) + abs(a[1]) == 3);
const uint8_t *master;
int transpose, reverse, negative;
if (oblique) {
negative = (a[0] < 0);
transpose = (abs(a[0]) == 1);
reverse = (a[0] < 0) ^ (a[1] < 0);
} else {
negative = (a[0] < 0 || a[1] < 0);
transpose = (a[0] == 0);
reverse = 0;
}
master = (oblique ?
wedge_mask_obl[negative][transpose][reverse] :
wedge_mask_str[negative][transpose]) +
MASK_MASTER_STRIDE * (MASK_MASTER_SIZE / 2 - hoff) +
MASK_MASTER_SIZE / 2 - woff;
return master;
}
// Equation of line: f(x, y) = a[0]*(x - a[2]*w/4) + a[1]*(y - a[3]*h/4) = 0
// The soft mask is obtained by computing f(x, y) and then calling
// get_masked_weight(f(x, y)).
static const int wedge_params_sml[1 << WEDGE_BITS_SML][4] = {
{-1, 2, 2, 2},
{ 1, -2, 2, 2},
{-2, 1, 2, 2},
{ 2, -1, 2, 2},
{-2, -1, 2, 2},
{ 2, 1, 2, 2},
{-1, -2, 2, 2},
{ 1, 2, 2, 2},
};
static const int wedge_params_med_hgtw[1 << WEDGE_BITS_MED][4] = {
{-1, 2, 2, 2},
{ 1, -2, 2, 2},
{-2, 1, 2, 2},
{ 2, -1, 2, 2},
{-2, -1, 2, 2},
{ 2, 1, 2, 2},
{-1, -2, 2, 2},
{ 1, 2, 2, 2},
{-1, 2, 2, 1},
{ 1, -2, 2, 1},
{-1, 2, 2, 3},
{ 1, -2, 2, 3},
{-1, -2, 2, 1},
{ 1, 2, 2, 1},
{-1, -2, 2, 3},
{ 1, 2, 2, 3},
};
static const int wedge_params_med_hltw[1 << WEDGE_BITS_MED][4] = {
{-1, 2, 2, 2},
{ 1, -2, 2, 2},
{-2, 1, 2, 2},
{ 2, -1, 2, 2},
{-2, -1, 2, 2},
{ 2, 1, 2, 2},
{-1, -2, 2, 2},
{ 1, 2, 2, 2},
{-2, 1, 1, 2},
{ 2, -1, 1, 2},
{-2, 1, 3, 2},
{ 2, -1, 3, 2},
{-2, -1, 1, 2},
{ 2, 1, 1, 2},
{-2, -1, 3, 2},
{ 2, 1, 3, 2},
};
static const int wedge_params_med_heqw[1 << WEDGE_BITS_MED][4] = {
{-1, 2, 2, 2},
{ 1, -2, 2, 2},
{-2, 1, 2, 2},
{ 2, -1, 2, 2},
{-2, -1, 2, 2},
{ 2, 1, 2, 2},
{-1, -2, 2, 2},
{ 1, 2, 2, 2},
{ 0, -2, 0, 1},
{ 0, 2, 0, 1},
{ 0, -2, 0, 3},
{ 0, 2, 0, 3},
{-2, 0, 1, 0},
{ 2, 0, 1, 0},
{-2, 0, 3, 0},
{ 2, 0, 3, 0},
};
static const int wedge_params_big_hgtw[1 << WEDGE_BITS_BIG][4] = {
{-1, 2, 2, 2},
{ 1, -2, 2, 2},
{-2, 1, 2, 2},
{ 2, -1, 2, 2},
{-2, -1, 2, 2},
{ 2, 1, 2, 2},
{-1, -2, 2, 2},
{ 1, 2, 2, 2},
{-1, 2, 2, 1},
{ 1, -2, 2, 1},
{-1, 2, 2, 3},
{ 1, -2, 2, 3},
{-1, -2, 2, 1},
{ 1, 2, 2, 1},
{-1, -2, 2, 3},
{ 1, 2, 2, 3},
{-2, 1, 1, 2},
{ 2, -1, 1, 2},
{-2, 1, 3, 2},
{ 2, -1, 3, 2},
{-2, -1, 1, 2},
{ 2, 1, 1, 2},
{-2, -1, 3, 2},
{ 2, 1, 3, 2},
{ 0, -2, 0, 1},
{ 0, 2, 0, 1},
{ 0, -2, 0, 2},
{ 0, 2, 0, 2},
{ 0, -2, 0, 3},
{ 0, 2, 0, 3},
{-2, 0, 2, 0},
{ 2, 0, 2, 0},
};
static const int wedge_params_big_hltw[1 << WEDGE_BITS_BIG][4] = {
{-1, 2, 2, 2},
{ 1, -2, 2, 2},
{-2, 1, 2, 2},
{ 2, -1, 2, 2},
{-2, -1, 2, 2},
{ 2, 1, 2, 2},
{-1, -2, 2, 2},
{ 1, 2, 2, 2},
{-1, 2, 2, 1},
{ 1, -2, 2, 1},
{-1, 2, 2, 3},
{ 1, -2, 2, 3},
{-1, -2, 2, 1},
{ 1, 2, 2, 1},
{-1, -2, 2, 3},
{ 1, 2, 2, 3},
{-2, 1, 1, 2},
{ 2, -1, 1, 2},
{-2, 1, 3, 2},
{ 2, -1, 3, 2},
{-2, -1, 1, 2},
{ 2, 1, 1, 2},
{-2, -1, 3, 2},
{ 2, 1, 3, 2},
{ 0, -2, 0, 2},
{ 0, 2, 0, 2},
{-2, 0, 1, 0},
{ 2, 0, 1, 0},
{-2, 0, 2, 0},
{ 2, 0, 2, 0},
{-2, 0, 3, 0},
{ 2, 0, 3, 0},
};
static const int wedge_params_big_heqw[1 << WEDGE_BITS_BIG][4] = {
{-1, 2, 2, 2},
{ 1, -2, 2, 2},
{-2, 1, 2, 2},
{ 2, -1, 2, 2},
{-2, -1, 2, 2},
{ 2, 1, 2, 2},
{-1, -2, 2, 2},
{ 1, 2, 2, 2},
{-1, 2, 2, 1},
{ 1, -2, 2, 1},
{-1, 2, 2, 3},
{ 1, -2, 2, 3},
{-1, -2, 2, 1},
{ 1, 2, 2, 1},
{-1, -2, 2, 3},
{ 1, 2, 2, 3},
{-2, 1, 1, 2},
{ 2, -1, 1, 2},
{-2, 1, 3, 2},
{ 2, -1, 3, 2},
{-2, -1, 1, 2},
{ 2, 1, 1, 2},
{-2, -1, 3, 2},
{ 2, 1, 3, 2},
{ 0, -2, 0, 1},
{ 0, 2, 0, 1},
{ 0, -2, 0, 3},
{ 0, 2, 0, 3},
{-2, 0, 1, 0},
{ 2, 0, 1, 0},
{-2, 0, 3, 0},
{ 2, 0, 3, 0},
};
static const int *get_wedge_params(int wedge_index,
BLOCK_SIZE sb_type,
int h, int w) {
const int *a = NULL;
const int wedge_bits = get_wedge_bits(sb_type);
if (wedge_index == WEDGE_NONE)
return NULL;
if (wedge_bits == WEDGE_BITS_SML) {
a = wedge_params_sml[wedge_index];
} else if (wedge_bits == WEDGE_BITS_MED) {
if (h > w)
a = wedge_params_med_hgtw[wedge_index];
else if (h < w)
a = wedge_params_med_hltw[wedge_index];
else
a = wedge_params_med_heqw[wedge_index];
} else if (wedge_bits == WEDGE_BITS_BIG) {
if (h > w)
a = wedge_params_big_hgtw[wedge_index];
else if (h < w)
a = wedge_params_big_hltw[wedge_index];
else
a = wedge_params_big_heqw[wedge_index];
} else {
assert(0);
}
return a;
}
const uint8_t *vp10_get_soft_mask(int wedge_index,
BLOCK_SIZE sb_type,
int h, int w) {
const int *a = get_wedge_params(wedge_index, sb_type, h, w);
if (a) {
return get_wedge_mask_inplace(a, h, w);
} else {
return NULL;
}
}
#if CONFIG_SUPERTX
const uint8_t *get_soft_mask_extend(int wedge_index, int plane,
BLOCK_SIZE sb_type,
int wedge_offset_y,
int wedge_offset_x) {
int subh = (plane ? 2 : 4) << b_height_log2_lookup[sb_type];
int subw = (plane ? 2 : 4) << b_width_log2_lookup[sb_type];
const int *a = get_wedge_params(wedge_index, sb_type, subh, subw);
if (a) {
const uint8_t *mask = get_wedge_mask_inplace(a, subh, subw);
mask -= (wedge_offset_x + wedge_offset_y * MASK_MASTER_STRIDE);
return mask;
} else {
return NULL;
}
}
static void build_masked_compound_extend(uint8_t *dst, int dst_stride,
uint8_t *dst2, int dst2_stride,
int plane,
int wedge_index, BLOCK_SIZE sb_type,
int wedge_offset_y, int wedge_offset_x,
int h, int w) {
int i, j;
const uint8_t *mask = get_soft_mask_extend(
wedge_index, plane, sb_type, wedge_offset_y, wedge_offset_x);
for (i = 0; i < h; ++i)
for (j = 0; j < w; ++j) {
int m = mask[i * MASK_MASTER_STRIDE + j];
dst[i * dst_stride + j] = (dst[i * dst_stride + j] * m +
dst2[i * dst2_stride + j] *
((1 << WEDGE_WEIGHT_BITS) - m) +
(1 << (WEDGE_WEIGHT_BITS - 1))) >>
WEDGE_WEIGHT_BITS;
}
}
#if CONFIG_VP9_HIGHBITDEPTH
static void build_masked_compound_extend_highbd(
uint8_t *dst_8, int dst_stride,
uint8_t *dst2_8, int dst2_stride, int plane,
int wedge_index, BLOCK_SIZE sb_type,
int wedge_offset_y, int wedge_offset_x,
int h, int w) {
int i, j;
const uint8_t *mask = get_soft_mask_extend(
wedge_index, plane, sb_type, wedge_offset_y, wedge_offset_x);
uint16_t *dst = CONVERT_TO_SHORTPTR(dst_8);