Commit 0ead613b authored by Sebastien Alaiwan's avatar Sebastien Alaiwan

Remove NCOBMC experiment

This experiment has been abandonned for AV1.

Change-Id: I3a6dfd2178aff2502d0ea7e9dd6d584b4db0eb1f
parent aa1d2f10
......@@ -22,11 +22,7 @@ extern "C" {
#undef MAX_SB_SIZE
#if CONFIG_NCOBMC
#define NC_MODE_INFO 1
#else
#define NC_MODE_INFO 0
#endif
// Max superblock size
#if CONFIG_EXT_PARTITION
......
......@@ -1462,49 +1462,6 @@ const uint8_t *av1_get_obmc_mask(int length) {
}
}
#if CONFIG_NCOBMC
// obmc_mask_flipN[overlap_position]
static const uint8_t obmc_mask_flip1[1] = { 55 };
static const uint8_t obmc_mask_flip2[2] = { 62, 45 };
static const uint8_t obmc_mask_flip4[4] = { 64, 59, 50, 39 };
static const uint8_t obmc_mask_flip8[8] = { 64, 63, 61, 57, 53, 48, 42, 36 };
static const uint8_t obmc_mask_flip16[16] = { 64, 64, 64, 63, 61, 60, 58, 56,
54, 52, 49, 46, 43, 40, 37, 34 };
static const uint8_t obmc_mask_flip32[32] = { 64, 64, 64, 64, 64, 63, 63, 62,
62, 61, 60, 60, 59, 58, 57, 56,
55, 53, 52, 51, 50, 48, 47, 45,
44, 43, 41, 40, 38, 36, 35, 33 };
#if CONFIG_EXT_PARTITION
static const uint8_t obmc_mask_flip64[64] = {
64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 63, 63, 63, 63, 62, 62,
62, 62, 62, 61, 60, 60, 60, 60, 60, 59, 58, 58, 57, 57, 56, 56,
56, 55, 54, 53, 52, 52, 51, 51, 51, 50, 49, 48, 47, 47, 46, 45,
44, 44, 44, 43, 42, 41, 40, 40, 39, 38, 37, 36, 35, 35, 34, 33,
};
#endif // CONFIG_EXT_PARTITION
const uint8_t *av1_get_obmc_mask_flipped(int length) {
switch (length) {
case 1: return obmc_mask_flip1;
case 2: return obmc_mask_flip2;
case 4: return obmc_mask_flip4;
case 8: return obmc_mask_flip8;
case 16: return obmc_mask_flip16;
case 32: return obmc_mask_flip32;
#if CONFIG_EXT_PARTITION
case 64: return obmc_mask_flip64;
#endif // CONFIG_EXT_PARTITION
default: assert(0); return NULL;
}
}
#endif // CONFIG_NCOBMC
static INLINE void increment_int_ptr(MACROBLOCKD *xd, int rel_mi_rc,
uint8_t mi_hw, MODE_INFO *mi,
void *fun_ctxt) {
......@@ -1905,324 +1862,6 @@ void av1_build_obmc_inter_predictors_sb(const AV1_COMMON *cm, MACROBLOCKD *xd,
dst_buf2, dst_stride2);
}
#if CONFIG_NCOBMC
void av1_build_prediction_by_bottom_preds(const AV1_COMMON *cm, MACROBLOCKD *xd,
int mi_row, int mi_col,
uint8_t *tmp_buf[MAX_MB_PLANE],
int tmp_width[MAX_MB_PLANE],
int tmp_height[MAX_MB_PLANE],
int tmp_stride[MAX_MB_PLANE]) {
const TileInfo *const tile = &xd->tile;
#if CONFIG_DEBUG
BLOCK_SIZE bsize = xd->mi[0]->mbmi.sb_type;
#endif
int i, j, mi_step, ref;
const int ilimit = AOMMIN(xd->n8_w, cm->mi_cols - mi_col);
int mb_to_right_edge_base = xd->mb_to_right_edge;
if (mi_row + xd->n8_h >= tile->mi_row_end ||
(mi_row + xd->n8_h) % MI_SIZE == 0 || (mi_row + xd->n8_h) >= cm->mi_rows)
return;
assert(bsize >= BLOCK_8X8);
xd->mb_to_top_edge -= xd->n8_h * 32;
for (i = 0; i < ilimit; i += mi_step) {
int mi_row_offset = xd->n8_h;
int mi_col_offset = i;
int mi_x, mi_y, bw, bh;
MODE_INFO *mi = xd->mi[mi_col_offset + mi_row_offset * xd->mi_stride];
MB_MODE_INFO *mbmi = &mi->mbmi;
MB_MODE_INFO backup_mbmi;
mi_step = AOMMIN(xd->n8_w, mi_size_wide[mbmi->sb_type]);
if (!is_neighbor_overlappable(mbmi)) continue;
backup_mbmi = *mbmi;
modify_neighbor_predictor_for_obmc(mbmi);
for (j = 0; j < MAX_MB_PLANE; ++j) {
struct macroblockd_plane *const pd = &xd->plane[j];
setup_pred_plane(&pd->dst, AOMMAX(mbmi->sb_type, BLOCK_8X8), tmp_buf[j],
tmp_width[j], tmp_height[j], tmp_stride[j],
(xd->n8_h >> 1), i, NULL, pd->subsampling_x,
pd->subsampling_y);
}
for (ref = 0; ref < 1 + has_second_ref(mbmi); ++ref) {
const MV_REFERENCE_FRAME frame = mbmi->ref_frame[ref];
const RefBuffer *const ref_buf = &cm->frame_refs[frame - LAST_FRAME];
xd->block_refs[ref] = ref_buf;
if ((!av1_is_valid_scale(&ref_buf->sf)))
aom_internal_error(xd->error_info, AOM_CODEC_UNSUP_BITSTREAM,
"Reference frame has invalid dimensions");
av1_setup_pre_planes(xd, ref, ref_buf->buf, mi_row + (xd->n8_h >> 1),
mi_col + i, &ref_buf->sf);
}
xd->mb_to_left_edge = -(((mi_col + i) * MI_SIZE) * 8);
xd->mb_to_right_edge =
mb_to_right_edge_base + (xd->n8_w - i - mi_step) * 64;
mi_x = (mi_col + i) << MI_SIZE_LOG2;
mi_y = (mi_row << MI_SIZE_LOG2) + xd->n8_h * (MI_SIZE >> 1);
for (j = 0; j < MAX_MB_PLANE; ++j) {
const struct macroblockd_plane *pd = &xd->plane[j];
bw = (mi_step << MI_SIZE_LOG2) >> pd->subsampling_x;
bh = (xd->n8_h << (MI_SIZE_LOG2 - 1)) >> pd->subsampling_y;
build_inter_predictors(cm, xd, j, mi, 1, 0, bw, bh, 0,
xd->n8_h == 1 ? (4 >> pd->subsampling_y) : 0, bw,
bh, mi_x, mi_y);
}
*mbmi = backup_mbmi;
}
xd->mb_to_left_edge = -((mi_col * MI_SIZE) * 8);
xd->mb_to_right_edge = mb_to_right_edge_base;
xd->mb_to_top_edge += xd->n8_h * 32;
}
void av1_build_prediction_by_right_preds(const AV1_COMMON *cm, MACROBLOCKD *xd,
int mi_row, int mi_col,
uint8_t *tmp_buf[MAX_MB_PLANE],
int tmp_width[MAX_MB_PLANE],
int tmp_height[MAX_MB_PLANE],
const int tmp_stride[MAX_MB_PLANE]) {
const TileInfo *const tile = &xd->tile;
#if CONFIG_DEBUG
BLOCK_SIZE bsize = xd->mi[0]->mbmi.sb_type;
#endif
int i, j, mi_step, ref;
const int ilimit = AOMMIN(xd->n8_h, cm->mi_rows - mi_row);
int mb_to_bottom_edge_base = xd->mb_to_bottom_edge;
if (mi_col + xd->n8_w >= tile->mi_col_end ||
(mi_col + xd->n8_w) % MI_SIZE == 0 || (mi_col + xd->n8_w) >= cm->mi_cols)
return;
assert(bsize >= BLOCK_8X8);
xd->mb_to_left_edge -= xd->n8_w / 2 * MI_SIZE * 8;
for (i = 0; i < ilimit; i += mi_step) {
int mi_row_offset = i;
int mi_col_offset = xd->n8_w;
int mi_x, mi_y, bw, bh;
MODE_INFO *mi = xd->mi[mi_col_offset + mi_row_offset * xd->mi_stride];
MB_MODE_INFO *mbmi = &mi->mbmi;
MB_MODE_INFO backup_mbmi;
mi_step = AOMMIN(xd->n8_h, mi_size_high[mbmi->sb_type]);
if (!is_neighbor_overlappable(mbmi)) continue;
backup_mbmi = *mbmi;
modify_neighbor_predictor_for_obmc(mbmi);
for (j = 0; j < MAX_MB_PLANE; ++j) {
struct macroblockd_plane *const pd = &xd->plane[j];
setup_pred_plane(&pd->dst, AOMMAX(mbmi->sb_type, BLOCK_8X8), tmp_buf[j],
tmp_width[j], tmp_height[j], tmp_stride[j], i,
xd->n8_w >> 1, NULL, pd->subsampling_x,
pd->subsampling_y);
}
for (ref = 0; ref < 1 + has_second_ref(mbmi); ++ref) {
const MV_REFERENCE_FRAME frame = mbmi->ref_frame[ref];
const RefBuffer *const ref_buf = &cm->frame_refs[frame - LAST_FRAME];
xd->block_refs[ref] = ref_buf;
if ((!av1_is_valid_scale(&ref_buf->sf)))
aom_internal_error(xd->error_info, AOM_CODEC_UNSUP_BITSTREAM,
"Reference frame has invalid dimensions");
av1_setup_pre_planes(xd, ref, ref_buf->buf, mi_row + i,
mi_col + (xd->n8_w >> 1), &ref_buf->sf);
}
xd->mb_to_top_edge = -(((mi_row + i) * MI_SIZE) * 8);
xd->mb_to_bottom_edge =
mb_to_bottom_edge_base + (xd->n8_h - i - mi_step) * MI_SIZE * 8;
mi_x = (mi_col << MI_SIZE_LOG2) + xd->n8_w * (MI_SIZE >> 1);
mi_y = (mi_row + i) << MI_SIZE_LOG2;
for (j = 0; j < MAX_MB_PLANE; ++j) {
const struct macroblockd_plane *pd = &xd->plane[j];
bw = (xd->n8_w << (MI_SIZE_LOG2 - 1)) >> pd->subsampling_x;
bh = (mi_step << MI_SIZE_LOG2) >> pd->subsampling_y;
build_inter_predictors(cm, xd, j, mi, 1, 0, bw, bh,
xd->n8_w == 1 ? 4 >> pd->subsampling_x : 0, 0, bw,
bh, mi_x, mi_y);
}
*mbmi = backup_mbmi;
}
xd->mb_to_top_edge = -((mi_row * MI_SIZE) * 8);
xd->mb_to_bottom_edge = mb_to_bottom_edge_base;
xd->mb_to_left_edge += xd->n8_w / 2 * MI_SIZE * 8;
}
// This function combines motion compensated predictions that is generated by
// bottom/right neighboring blocks' inter predictors with prediction in dst
// buffer.
void av1_merge_dst_bottom_right_preds(const AV1_COMMON *cm, MACROBLOCKD *xd,
int mi_row, int mi_col,
uint8_t *bottom[MAX_MB_PLANE],
const int bottom_stride[MAX_MB_PLANE],
uint8_t *right[MAX_MB_PLANE],
const int right_stride[MAX_MB_PLANE]) {
const TileInfo *const tile = &xd->tile;
BLOCK_SIZE bsize = xd->mi[0]->mbmi.sb_type;
int plane, i, mi_step;
const int bottom_available = mi_row + xd->n8_h < tile->mi_row_end &&
(mi_row + xd->n8_h) % MI_SIZE != 0 &&
(mi_row + xd->n8_h) < cm->mi_rows;
#if CONFIG_HIGHBITDEPTH
int is_hbd = (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) ? 1 : 0;
#endif // CONFIG_HIGHBITDEPTH
// handle bottom row
for (i = 0; bottom_available && i < AOMMIN(xd->n8_w, cm->mi_cols - mi_col);
i += mi_step) {
int mi_row_offset = xd->n8_h;
int mi_col_offset = i;
MODE_INFO *mi = xd->mi[mi_col_offset + mi_row_offset * xd->mi_stride];
MB_MODE_INFO *mbmi = &mi->mbmi;
int overlap;
mi_step = AOMMIN(xd->n8_w, mi_size_wide[mbmi->sb_type]);
if (!is_neighbor_overlappable(mbmi)) continue;
overlap = num_4x4_blocks_high_lookup[bsize] << 1;
for (plane = 0; plane < MAX_MB_PLANE; ++plane) {
const struct macroblockd_plane *pd = &xd->plane[plane];
const int bw = (mi_step * MI_SIZE) >> pd->subsampling_x;
const int bh = overlap >> pd->subsampling_y;
const int dst_stride = pd->dst.stride;
uint8_t *dst =
&pd->dst.buf[((i * MI_SIZE) >> pd->subsampling_x) +
(((xd->n8_h * MI_SIZE - overlap) * dst_stride) >>
pd->subsampling_y)];
const int tmp_stride = bottom_stride[plane];
const uint8_t *const tmp =
&bottom[plane][((i * MI_SIZE) >> pd->subsampling_x) +
(((xd->n8_h * MI_SIZE - overlap) * tmp_stride) >>
pd->subsampling_y)];
const uint8_t *const mask = av1_get_obmc_mask_flipped(bh);
#if CONFIG_HIGHBITDEPTH
if (is_hbd)
aom_highbd_blend_a64_vmask(dst, dst_stride, dst, dst_stride, tmp,
tmp_stride, mask, bh, bw, xd->bd);
else
#endif // CONFIG_HIGHBITDEPTH
aom_blend_a64_vmask(dst, dst_stride, dst, dst_stride, tmp, tmp_stride,
mask, bh, bw);
}
} // each mi in the bottom row
// handle right column
if (mi_col + xd->n8_w >= tile->mi_col_end ||
(mi_col + xd->n8_w) % MI_SIZE == 0 || (mi_col + xd->n8_w) >= cm->mi_cols)
return;
for (i = 0; i < AOMMIN(xd->n8_h, cm->mi_rows - mi_row); i += mi_step) {
int mi_row_offset = i;
int mi_col_offset = xd->n8_w;
int overlap;
MODE_INFO *mi = xd->mi[mi_col_offset + mi_row_offset * xd->mi_stride];
MB_MODE_INFO *mbmi = &mi->mbmi;
mi_step = AOMMIN(xd->n8_h, mi_size_high[mbmi->sb_type]);
if (!is_neighbor_overlappable(mbmi)) continue;
overlap = num_4x4_blocks_wide_lookup[bsize] << 1;
for (plane = 0; plane < MAX_MB_PLANE; ++plane) {
const struct macroblockd_plane *pd = &xd->plane[plane];
const int bw = overlap >> pd->subsampling_x;
const int bh = (mi_step * MI_SIZE) >> pd->subsampling_y;
const int dst_stride = pd->dst.stride;
uint8_t *dst =
&pd->dst.buf[((i * MI_SIZE * dst_stride) >> pd->subsampling_y) +
((xd->n8_w * MI_SIZE - overlap) >> pd->subsampling_x)];
const int tmp_stride = right_stride[plane];
const uint8_t *const tmp =
&right[plane][((i * MI_SIZE * tmp_stride) >> pd->subsampling_y) +
((xd->n8_w * MI_SIZE - overlap) >> pd->subsampling_x)];
const uint8_t *const mask = av1_get_obmc_mask_flipped(bw);
#if CONFIG_HIGHBITDEPTH
if (is_hbd)
aom_highbd_blend_a64_hmask(dst, dst_stride, dst, dst_stride, tmp,
tmp_stride, mask, bh, bw, xd->bd);
else
#endif // CONFIG_HIGHBITDEPTH
aom_blend_a64_hmask(dst, dst_stride, dst, dst_stride, tmp, tmp_stride,
mask, bh, bw);
}
} // each mi in the right column
}
// This function generates 4 sided obmc. (1) Prediction blocks generated by
// bottom and right motion vectors are calculated. (2) Combine them with the
// original prediction block (which should be pre-stored in xd->plane[].dst.buf
// before calling this function). The results is updated in xd->plane[].dst.buf
// (3) Call causal obmc prediction function, which will generate left and above
// preds, and then merge them and xd->plane[].dst.buf.
void av1_build_ncobmc_inter_predictors_sb(const AV1_COMMON *cm, MACROBLOCKD *xd,
int mi_row, int mi_col) {
#if CONFIG_HIGHBITDEPTH
DECLARE_ALIGNED(16, uint8_t, tmp_buf1[2 * MAX_MB_PLANE * MAX_SB_SQUARE]);
DECLARE_ALIGNED(16, uint8_t, tmp_buf2[2 * MAX_MB_PLANE * MAX_SB_SQUARE]);
#else
DECLARE_ALIGNED(16, uint8_t, tmp_buf1[MAX_MB_PLANE * MAX_SB_SQUARE]);
DECLARE_ALIGNED(16, uint8_t, tmp_buf2[MAX_MB_PLANE * MAX_SB_SQUARE]);
#endif // CONFIG_HIGHBITDEPTH
uint8_t *dst_buf1[MAX_MB_PLANE], *dst_buf2[MAX_MB_PLANE];
int dst_stride1[MAX_MB_PLANE] = { MAX_SB_SIZE, MAX_SB_SIZE, MAX_SB_SIZE };
int dst_stride2[MAX_MB_PLANE] = { MAX_SB_SIZE, MAX_SB_SIZE, MAX_SB_SIZE };
int dst_width1[MAX_MB_PLANE] = { MAX_SB_SIZE, MAX_SB_SIZE, MAX_SB_SIZE };
int dst_width2[MAX_MB_PLANE] = { MAX_SB_SIZE, MAX_SB_SIZE, MAX_SB_SIZE };
int dst_height1[MAX_MB_PLANE] = { MAX_SB_SIZE, MAX_SB_SIZE, MAX_SB_SIZE };
int dst_height2[MAX_MB_PLANE] = { MAX_SB_SIZE, MAX_SB_SIZE, MAX_SB_SIZE };
#if CONFIG_HIGHBITDEPTH
if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
int len = sizeof(uint16_t);
dst_buf1[0] = CONVERT_TO_BYTEPTR(tmp_buf1);
dst_buf1[1] = CONVERT_TO_BYTEPTR(tmp_buf1 + MAX_SB_SQUARE * len);
dst_buf1[2] = CONVERT_TO_BYTEPTR(tmp_buf1 + MAX_SB_SQUARE * 2 * len);
dst_buf2[0] = CONVERT_TO_BYTEPTR(tmp_buf2);
dst_buf2[1] = CONVERT_TO_BYTEPTR(tmp_buf2 + MAX_SB_SQUARE * len);
dst_buf2[2] = CONVERT_TO_BYTEPTR(tmp_buf2 + MAX_SB_SQUARE * 2 * len);
} else {
#endif // CONFIG_HIGHBITDEPTH
dst_buf1[0] = tmp_buf1;
dst_buf1[1] = tmp_buf1 + MAX_SB_SQUARE;
dst_buf1[2] = tmp_buf1 + MAX_SB_SQUARE * 2;
dst_buf2[0] = tmp_buf2;
dst_buf2[1] = tmp_buf2 + MAX_SB_SQUARE;
dst_buf2[2] = tmp_buf2 + MAX_SB_SQUARE * 2;
#if CONFIG_HIGHBITDEPTH
}
#endif // CONFIG_HIGHBITDEPTH
const BLOCK_SIZE bsize = xd->mi[0]->mbmi.sb_type;
av1_build_prediction_by_bottom_preds(cm, xd, mi_row, mi_col, dst_buf1,
dst_width1, dst_height1, dst_stride1);
av1_build_prediction_by_right_preds(cm, xd, mi_row, mi_col, dst_buf2,
dst_width2, dst_height2, dst_stride2);
av1_setup_dst_planes(xd->plane, bsize, get_frame_new_buffer(cm), mi_row,
mi_col);
av1_merge_dst_bottom_right_preds(cm, xd, mi_row, mi_col, dst_buf1,
dst_stride1, dst_buf2, dst_stride2);
av1_setup_dst_planes(xd->plane, bsize, get_frame_new_buffer(cm), mi_row,
mi_col);
av1_build_obmc_inter_predictors_sb(cm, xd, mi_row, mi_col);
av1_setup_dst_planes(xd->plane, bsize, get_frame_new_buffer(cm), mi_row,
mi_col);
}
#endif // CONFIG_NCOBMC
/* clang-format off */
#if CONFIG_EXT_PARTITION
static const int ii_weights1d[MAX_SB_SIZE] = {
......
......@@ -505,10 +505,6 @@ void av1_build_prediction_by_left_preds(const AV1_COMMON *cm, MACROBLOCKD *xd,
int tmp_stride[MAX_MB_PLANE]);
void av1_build_obmc_inter_predictors_sb(const AV1_COMMON *cm, MACROBLOCKD *xd,
int mi_row, int mi_col);
#if CONFIG_NCOBMC
void av1_build_ncobmc_inter_predictors_sb(const AV1_COMMON *cm, MACROBLOCKD *xd,
int mi_row, int mi_col);
#endif
#define MASK_MASTER_SIZE ((MAX_WEDGE_SIZE) << 1)
#define MASK_MASTER_STRIDE (MASK_MASTER_SIZE)
......
......@@ -498,11 +498,7 @@ static void decode_token_and_recon_block(AV1Decoder *const pbi,
av1_build_inter_predictors_sb(cm, xd, mi_row, mi_col, NULL, bsize);
if (mbmi->motion_mode == OBMC_CAUSAL) {
#if CONFIG_NCOBMC
av1_build_ncobmc_inter_predictors_sb(cm, xd, mi_row, mi_col);
#else
av1_build_obmc_inter_predictors_sb(cm, xd, mi_row, mi_col);
#endif
}
// Reconstruction
if (!mbmi->skip) {
......
......@@ -1464,12 +1464,9 @@ static void encode_b(const AV1_COMP *const cpi, TileDataEnc *tile_data,
PICK_MODE_CONTEXT *ctx, int *rate) {
TileInfo *const tile = &tile_data->tile_info;
MACROBLOCK *const x = &td->mb;
#if CONFIG_NCOBMC || CONFIG_EXT_DELTA_Q
#if CONFIG_EXT_DELTA_Q
MACROBLOCKD *xd = &x->e_mbd;
MB_MODE_INFO *mbmi;
#if CONFIG_NCOBMC
int check_ncobmc;
#endif
#endif
set_offsets(cpi, tile, x, mi_row, mi_col, bsize);
......@@ -1477,24 +1474,6 @@ static void encode_b(const AV1_COMP *const cpi, TileDataEnc *tile_data,
x->e_mbd.mi[0]->mbmi.partition = partition;
#endif
update_state(cpi, tile_data, td, ctx, mi_row, mi_col, bsize, dry_run);
#if CONFIG_NCOBMC
mbmi = &xd->mi[0]->mbmi;
set_ref_ptrs(&cpi->common, xd, mbmi->ref_frame[0], mbmi->ref_frame[1]);
#endif
#if CONFIG_NCOBMC
const MOTION_MODE motion_allowed =
motion_mode_allowed(0, xd->global_motion, xd, xd->mi[0]);
#endif
#if CONFIG_NCOBMC
check_ncobmc = is_inter_block(mbmi) && motion_allowed >= OBMC_CAUSAL;
if (!dry_run && check_ncobmc) {
av1_check_ncobmc_rd(cpi, x, mi_row, mi_col);
av1_setup_dst_planes(x->e_mbd.plane, bsize,
get_frame_new_buffer(&cpi->common), mi_row, mi_col);
}
#endif
#if CONFIG_LV_MAP
av1_set_coeff_buffer(cpi, x, mi_row, mi_col);
......@@ -4984,12 +4963,7 @@ static void encode_superblock(const AV1_COMP *const cpi, TileDataEnc *tile_data,
av1_build_inter_predictors_sb(cm, xd, mi_row, mi_col, NULL, block_size);
if (mbmi->motion_mode == OBMC_CAUSAL) {
#if CONFIG_NCOBMC
if (dry_run == OUTPUT_ENABLED)
av1_build_ncobmc_inter_predictors_sb(cm, xd, mi_row, mi_col);
else
#endif
av1_build_obmc_inter_predictors_sb(cm, xd, mi_row, mi_col);
av1_build_obmc_inter_predictors_sb(cm, xd, mi_row, mi_col);
}
av1_encode_sb((AV1_COMMON *)cm, x, block_size, mi_row, mi_col);
......
......@@ -11226,127 +11226,3 @@ static void calc_target_weighted_pred(const AV1_COMMON *cm, const MACROBLOCK *x,
#endif // CONFIG_HIGHBITDEPTH
}
}
#if CONFIG_NCOBMC
void av1_check_ncobmc_rd(const struct AV1_COMP *cpi, struct macroblock *x,
int mi_row, int mi_col) {
const AV1_COMMON *const cm = &cpi->common;
MACROBLOCKD *const xd = &x->e_mbd;
MB_MODE_INFO *const mbmi = &xd->mi[0]->mbmi;
MB_MODE_INFO backup_mbmi;
BLOCK_SIZE bsize = mbmi->sb_type;
int ref, skip_blk, backup_skip = x->skip;
int64_t rd_causal;
RD_STATS rd_stats_y, rd_stats_uv;
const int skip_ctx = av1_get_skip_context(xd);
int rate_skip0 = x->skip_cost[skip_ctx][0];
int rate_skip1 = x->skip_cost[skip_ctx][1];
// Recompute the best causal predictor and rd
mbmi->motion_mode = SIMPLE_TRANSLATION;
set_ref_ptrs(cm, xd, mbmi->ref_frame[0], mbmi->ref_frame[1]);
for (ref = 0; ref < 1 + has_second_ref(mbmi); ++ref) {
YV12_BUFFER_CONFIG *cfg = get_ref_frame_buffer(cpi, mbmi->ref_frame[ref]);
assert(cfg != NULL);
av1_setup_pre_planes(xd, ref, cfg, mi_row, mi_col,
&xd->block_refs[ref]->sf);
}
av1_setup_dst_planes(x->e_mbd.plane, bsize,
get_frame_new_buffer(&cpi->common), mi_row, mi_col);
av1_build_inter_predictors_sb(cm, xd, mi_row, mi_col, NULL, bsize);
av1_subtract_plane(x, bsize, 0);
if (cm->tx_mode == TX_MODE_SELECT && !xd->lossless[mbmi->segment_id]) {
// ncobmc
select_tx_type_yrd(cpi, x, &rd_stats_y, bsize, mi_row, mi_col, INT64_MAX);
} else {
int idx, idy;
super_block_yrd(cpi, x, &rd_stats_y, bsize, INT64_MAX);
for (idy = 0; idy < xd->n8_h; ++idy)
for (idx = 0; idx < xd->n8_w; ++idx)
mbmi->inter_tx_size[idy][idx] = mbmi->tx_size;
memset(x->blk_skip[0], rd_stats_y.skip,
sizeof(uint8_t) * xd->n8_h * xd->n8_w * 4);
}
inter_block_uvrd(cpi, x, &rd_stats_uv, bsize, INT64_MAX, 0);
assert(rd_stats_y.rate != INT_MAX && rd_stats_uv.rate != INT_MAX);
if (rd_stats_y.skip && rd_stats_uv.skip) {
rd_stats_y.rate = rate_skip1;
rd_stats_uv.rate = 0;
rd_stats_y.dist = rd_stats_y.sse;
rd_stats_uv.dist = rd_stats_uv.sse;
skip_blk = 0;
} else if (RDCOST(x->rdmult,
(rd_stats_y.rate + rd_stats_uv.rate + rate_skip0),
(rd_stats_y.dist + rd_stats_uv.dist)) >
RDCOST(x->rdmult, rate_skip1,
(rd_stats_y.sse + rd_stats_uv.sse))) {
rd_stats_y.rate = rate_skip1;
rd_stats_uv.rate = 0;
rd_stats_y.dist = rd_stats_y.sse;
rd_stats_uv.dist = rd_stats_uv.sse;
skip_blk = 1;
} else {
rd_stats_y.rate += rate_skip0;
skip_blk = 0;
}
backup_skip = skip_blk;
backup_mbmi = *mbmi;
rd_causal = RDCOST(x->rdmult, (rd_stats_y.rate + rd_stats_uv.rate),
(rd_stats_y.dist + rd_stats_uv.dist));
rd_causal +=
RDCOST(x->rdmult, av1_cost_bit(cm->fc->motion_mode_prob[bsize][0], 0), 0);
// Check non-causal mode
mbmi->motion_mode = OBMC_CAUSAL;
av1_build_ncobmc_inter_predictors_sb(cm, xd, mi_row, mi_col);
av1_subtract_plane(x, bsize, 0);
if (cm->tx_mode == TX_MODE_SELECT && !xd->lossless[mbmi->segment_id]) {
// ncobmc
select_tx_type_yrd(cpi, x, &rd_stats_y, bsize, mi_row, mi_col, INT64_MAX);
} else {
int idx, idy;
super_block_yrd(cpi, x, &rd_stats_y, bsize, INT64_MAX);
for (idy = 0; idy < xd->n8_h; ++idy)
for (idx = 0; idx < xd->n8_w; ++idx)
mbmi->inter_tx_size[idy][idx] = mbmi->tx_size;
memset(x->blk_skip[0], rd_stats_y.skip,
sizeof(uint8_t) * xd->n8_h * xd->n8_w * 4);
}
inter_block_uvrd(cpi, x, &rd_stats_uv, bsize, INT64_MAX, 0);
assert(rd_stats_y.rate != INT_MAX && rd_stats_uv.rate != INT_MAX);
if (rd_stats_y.skip && rd_stats_uv.skip) {
rd_stats_y.rate = rate_skip1;
rd_stats_uv.rate = 0;
rd_stats_y.dist = rd_stats_y.sse;
rd_stats_uv.dist = rd_stats_uv.sse;
skip_blk = 0;
} else if (RDCOST(x->rdmult,
(rd_stats_y.rate + rd_stats_uv.rate + rate_skip0),
(rd_stats_y.dist + rd_stats_uv.dist)) >
RDCOST(x->rdmult, rate_skip1,
(rd_stats_y.sse + rd_stats_uv.sse))) {
rd_stats_y.rate = rate_skip1;
rd_stats_uv.rate = 0;
rd_stats_y.dist = rd_stats_y.sse;
rd_stats_uv.dist = rd_stats_uv.sse;
skip_blk = 1;
} else {
rd_stats_y.rate += rate_skip0;
skip_blk = 0;
}
if (rd_causal >
RDCOST(x->rdmult,
rd_stats_y.rate + rd_stats_uv.rate +
av1_cost_bit(cm->fc->motion_mode_prob[bsize][0], 1),
(rd_stats_y.dist + rd_stats_uv.dist))) {
x->skip = skip_blk;
} else {
*mbmi = backup_mbmi;
x->skip = backup_skip;
}
}
#endif // CONFIG_NCOBMC