Commit f03907a2 authored by Yue Chen's avatar Yue Chen

Make ext_inter/wedge/compound_segment/interintra on by default

(1) Make unit tests for masked sad/variance encoder-only
(2) Fix compile error with intrabc
(3) Fix warnings reported by static analysis

Change-Id: I0cd2176fcda0b81e1fc30283767678376ced4c42
parent 17c37ceb
......@@ -2981,13 +2981,13 @@ void av1_build_inter_predictors_for_planes_single_buf(
for (plane = plane_from; plane <= plane_to; ++plane) {
const BLOCK_SIZE plane_bsize =
get_plane_block_size(bsize, &xd->plane[plane]);
const int num_4x4_w = num_4x4_blocks_wide_lookup[plane_bsize];
const int num_4x4_h = num_4x4_blocks_high_lookup[plane_bsize];
const int bw = block_size_wide[plane_bsize];
const int bh = block_size_high[plane_bsize];
if (xd->mi[0]->mbmi.sb_type < BLOCK_8X8 && !CONFIG_CB4X4) {
int x, y;
const int num_4x4_w = num_4x4_blocks_wide_lookup[plane_bsize];
const int num_4x4_h = num_4x4_blocks_high_lookup[plane_bsize];
assert(bsize == BLOCK_8X8);
for (y = 0; y < num_4x4_h; ++y)
for (x = 0; x < num_4x4_w; ++x)
......@@ -3094,11 +3094,11 @@ void av1_build_wedge_inter_predictor_from_buf(
for (plane = plane_from; plane <= plane_to; ++plane) {
const BLOCK_SIZE plane_bsize =
get_plane_block_size(bsize, &xd->plane[plane]);
const int num_4x4_w = num_4x4_blocks_wide_lookup[plane_bsize];
const int num_4x4_h = num_4x4_blocks_high_lookup[plane_bsize];
if (xd->mi[0]->mbmi.sb_type < BLOCK_8X8 && !CONFIG_CB4X4) {
int x, y;
const int num_4x4_w = num_4x4_blocks_wide_lookup[plane_bsize];
const int num_4x4_h = num_4x4_blocks_high_lookup[plane_bsize];
assert(bsize == BLOCK_8X8);
for (y = 0; y < num_4x4_h; ++y)
for (x = 0; x < num_4x4_w; ++x)
......
......@@ -1029,7 +1029,7 @@ static void read_intra_frame_mode_info(AV1_COMMON *const cm,
av1_find_mv_refs(cm, xd, mi, INTRA_FRAME, &xd->ref_mv_count[INTRA_FRAME],
xd->ref_mv_stack[INTRA_FRAME],
#if CONFIG_EXT_INTER
compound_inter_mode_ctx,
NULL,
#endif // CONFIG_EXT_INTER
ref_mvs, mi_row, mi_col, NULL, NULL, inter_mode_ctx);
......
......@@ -7251,7 +7251,6 @@ static void compound_single_motion_search(
struct macroblockd_plane *const pd = &xd->plane[0];
struct buf_2d backup_yv12[MAX_MB_PLANE];
int last_besterr = INT_MAX;
const YV12_BUFFER_CONFIG *const scaled_ref_frame =
av1_get_scaled_ref_frame(cpi, ref);
......@@ -7355,10 +7354,7 @@ static void compound_single_motion_search(
// Restore the pointer to the first (possibly scaled) prediction buffer.
if (ref_idx) pd->pre[0] = orig_yv12;
if (bestsme < last_besterr) {
*this_mv = *best_mv;
last_besterr = bestsme;
}
if (bestsme < INT_MAX) *this_mv = *best_mv;
*rate_mv = 0;
......@@ -7484,7 +7480,7 @@ static int estimate_wedge_sign(const AV1_COMP *cpi, const MACROBLOCK *x,
const int f_index = bsize - BLOCK_8X8;
const int bw = block_size_wide[bsize];
const int bh = block_size_high[bsize];
uint32_t esq[2][4], var;
uint32_t esq[2][4];
int64_t tl, br;
#if CONFIG_HIGHBITDEPTH
......@@ -7494,23 +7490,22 @@ static int estimate_wedge_sign(const AV1_COMP *cpi, const MACROBLOCK *x,
}
#endif // CONFIG_HIGHBITDEPTH
var = cpi->fn_ptr[f_index].vf(src, src_stride, pred0, stride0, &esq[0][0]);
var = cpi->fn_ptr[f_index].vf(src + bw / 2, src_stride, pred0 + bw / 2,
stride0, &esq[0][1]);
var = cpi->fn_ptr[f_index].vf(src + bh / 2 * src_stride, src_stride,
pred0 + bh / 2 * stride0, stride0, &esq[0][2]);
var = cpi->fn_ptr[f_index].vf(src + bh / 2 * src_stride + bw / 2, src_stride,
pred0 + bh / 2 * stride0 + bw / 2, stride0,
&esq[0][3]);
var = cpi->fn_ptr[f_index].vf(src, src_stride, pred1, stride1, &esq[1][0]);
var = cpi->fn_ptr[f_index].vf(src + bw / 2, src_stride, pred1 + bw / 2,
stride1, &esq[1][1]);
var = cpi->fn_ptr[f_index].vf(src + bh / 2 * src_stride, src_stride,
pred1 + bh / 2 * stride1, stride0, &esq[1][2]);
var = cpi->fn_ptr[f_index].vf(src + bh / 2 * src_stride + bw / 2, src_stride,
pred1 + bh / 2 * stride1 + bw / 2, stride0,
&esq[1][3]);
(void)var;
cpi->fn_ptr[f_index].vf(src, src_stride, pred0, stride0, &esq[0][0]);
cpi->fn_ptr[f_index].vf(src + bw / 2, src_stride, pred0 + bw / 2, stride0,
&esq[0][1]);
cpi->fn_ptr[f_index].vf(src + bh / 2 * src_stride, src_stride,
pred0 + bh / 2 * stride0, stride0, &esq[0][2]);
cpi->fn_ptr[f_index].vf(src + bh / 2 * src_stride + bw / 2, src_stride,
pred0 + bh / 2 * stride0 + bw / 2, stride0,
&esq[0][3]);
cpi->fn_ptr[f_index].vf(src, src_stride, pred1, stride1, &esq[1][0]);
cpi->fn_ptr[f_index].vf(src + bw / 2, src_stride, pred1 + bw / 2, stride1,
&esq[1][1]);
cpi->fn_ptr[f_index].vf(src + bh / 2 * src_stride, src_stride,
pred1 + bh / 2 * stride1, stride0, &esq[1][2]);
cpi->fn_ptr[f_index].vf(src + bh / 2 * src_stride + bw / 2, src_stride,
pred1 + bh / 2 * stride1 + bw / 2, stride0,
&esq[1][3]);
tl = (int64_t)(esq[0][0] + esq[0][1] + esq[0][2]) -
(int64_t)(esq[1][0] + esq[1][1] + esq[1][2]);
......@@ -9082,7 +9077,8 @@ static int64_t handle_inter_mode(
av1_combine_interintra(xd, bsize, 0, tmp_buf, bw, intrapred, bw);
model_rd_for_sb(cpi, bsize, x, xd, 0, 0, &rate_sum, &dist_sum,
&tmp_skip_txfm_sb, &tmp_skip_sse_sb);
rd = RDCOST(x->rdmult, x->rddiv, rs + tmp_rate_mv + rate_sum, dist_sum);
rd =
RDCOST(x->rdmult, x->rddiv, tmp_rate_mv + rate_sum + rmode, dist_sum);
if (rd < best_interintra_rd) {
best_interintra_rd = rd;
best_interintra_mode = mbmi->interintra_mode;
......@@ -9113,7 +9109,7 @@ static int64_t handle_inter_mode(
if (rd != INT64_MAX)
rd = RDCOST(x->rdmult, x->rddiv, rmode + rate_mv + rwedge + rate_sum,
dist_sum);
best_interintra_rd_nowedge = rd;
best_interintra_rd_nowedge = best_interintra_rd;
// Disable wedge search if source variance is small
if (x->source_variance > cpi->sf.disable_wedge_search_var_thresh) {
......@@ -9143,9 +9139,7 @@ static int64_t handle_inter_mode(
&tmp_skip_txfm_sb, &tmp_skip_sse_sb);
rd = RDCOST(x->rdmult, x->rddiv,
rmode + tmp_rate_mv + rwedge + rate_sum, dist_sum);
if (rd < best_interintra_rd_wedge) {
best_interintra_rd_wedge = rd;
} else {
if (rd >= best_interintra_rd_wedge) {
tmp_mv.as_int = cur_mv[0].as_int;
tmp_rate_mv = rate_mv;
}
......@@ -9165,18 +9159,15 @@ static int64_t handle_inter_mode(
best_interintra_rd_wedge = rd;
if (best_interintra_rd_wedge < best_interintra_rd_nowedge) {
mbmi->use_wedge_interintra = 1;
best_interintra_rd = best_interintra_rd_wedge;
mbmi->mv[0].as_int = tmp_mv.as_int;
rd_stats->rate += tmp_rate_mv - rate_mv;
rate_mv = tmp_rate_mv;
} else {
mbmi->use_wedge_interintra = 0;
best_interintra_rd = best_interintra_rd_nowedge;
mbmi->mv[0].as_int = cur_mv[0].as_int;
}
} else {
mbmi->use_wedge_interintra = 0;
best_interintra_rd = best_interintra_rd_nowedge;
}
}
#endif // CONFIG_WEDGE
......
......@@ -491,6 +491,10 @@ post_process_cmdline() {
soft_enable ec_smallmul
soft_enable var_tx
soft_enable one_sided_compound
soft_enable ext_inter
soft_enable wedge
soft_enable compound_segment
soft_enable interintra
# Backwards/jenkins compatibility with --enable-aom-highbitdepth
enabled aom_highbitdepth && enable_feature highbitdepth
......
......@@ -89,7 +89,6 @@ TEST_P(MaskedSubPixelVarianceTest, OperationCheck) {
msk_ptr[j] = rnd(65);
}
for (int k = 0; k < 3; k++) {
xoffset = xoffsets[k];
for (int l = 0; l < 3; l++) {
xoffset = xoffsets[k];
yoffset = yoffsets[l];
......
......@@ -192,8 +192,8 @@ LIBAOM_TEST_SRCS-$(CONFIG_AV1_ENCODER) += blend_a64_mask_test.cc
LIBAOM_TEST_SRCS-$(CONFIG_AV1_ENCODER) += blend_a64_mask_1d_test.cc
ifeq ($(CONFIG_EXT_INTER),yes)
LIBAOM_TEST_SRCS-$(HAVE_SSSE3) += masked_variance_test.cc
LIBAOM_TEST_SRCS-$(HAVE_SSSE3) += masked_sad_test.cc
LIBAOM_TEST_SRCS-$(CONFIG_AV1_ENCODER) += masked_variance_test.cc
LIBAOM_TEST_SRCS-$(CONFIG_AV1_ENCODER) += masked_sad_test.cc
LIBAOM_TEST_SRCS-$(CONFIG_AV1_ENCODER) += av1_wedge_utils_test.cc
endif
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment