Commit 907f88c4 authored by Yue Chen's avatar Yue Chen
Browse files

Fixing a bug in obmc prediction in the rd loop

This bug made the rd loop use one-side obmc (compound of the current
predictor and the predictors of the left mi's, while the above ones
are ignored by mistake) to determine whether to use obmc. This fix
improved the compression performance by ~0.6% on different test sets.

Coding gain (%) of obmc experiment on derflr/derfhd/hevcmr/hevchd:
1.568/TBD/1.628/TBD

Change-Id: I43b239bedf9a8eebfd02315b1b036e140a998140
parent f9c25498
......@@ -654,8 +654,6 @@ void vp10_build_obmc_inter_prediction(VP10_COMMON *cm,
uint8_t *dst = use_tmp_dst_buf ?
&final_buf[plane][(i * 8) >> pd->subsampling_x] :
&pd->dst.buf[(i * 8) >> pd->subsampling_x];
int bmc_stride = pd->dst.stride;
uint8_t *bmc = &pd->dst.buf[(i * 8) >> pd->subsampling_x];
int tmp_stride = tmp_stride1[plane];
uint8_t *tmp = &tmp_buf1[plane][(i * 8) >> pd->subsampling_x];
const uint8_t *mask[2];
......@@ -665,27 +663,22 @@ void vp10_build_obmc_inter_prediction(VP10_COMMON *cm,
#if CONFIG_VP9_HIGHBITDEPTH
if (is_hbd) {
uint16_t *dst16 = CONVERT_TO_SHORTPTR(dst);
uint16_t *bmc16 = CONVERT_TO_SHORTPTR(bmc);
uint16_t *tmp16 = CONVERT_TO_SHORTPTR(tmp);
for (row = 0; row < bh; ++row) {
for (col = 0; col < bw; ++col) {
dst16[col] = (mask[0][row] * bmc16[col] + mask[1][row] * tmp16[col]
for (col = 0; col < bw; ++col)
dst16[col] = (mask[0][row] * dst16[col] + mask[1][row] * tmp16[col]
+ 32) >> 6;
}
dst16 += dst_stride;
bmc16 += bmc_stride;
tmp16 += tmp_stride;
}
} else {
#endif // CONFIG_VP9_HIGHBITDEPTH
for (row = 0; row < bh; ++row) {
for (col = 0; col < bw; ++col) {
dst[col] = (mask[0][row] * bmc[col] + mask[1][row] * tmp[col] + 32)
for (col = 0; col < bw; ++col)
dst[col] = (mask[0][row] * dst[col] + mask[1][row] * tmp[col] + 32)
>> 6;
}
dst += dst_stride;
bmc += bmc_stride;
tmp += tmp_stride;
}
#if CONFIG_VP9_HIGHBITDEPTH
......@@ -727,8 +720,6 @@ void vp10_build_obmc_inter_prediction(VP10_COMMON *cm,
uint8_t *dst = use_tmp_dst_buf ?
&final_buf[plane][(i * 8 * dst_stride) >> pd->subsampling_y] :
&pd->dst.buf[(i * 8 * dst_stride) >> pd->subsampling_y];
int bmc_stride = pd->dst.stride;
uint8_t *bmc = &pd->dst.buf[(i * 8 * bmc_stride) >> pd->subsampling_y];
int tmp_stride = tmp_stride2[plane];
uint8_t *tmp = &tmp_buf2[plane]
[(i * 8 * tmp_stride) >> pd->subsampling_y];
......@@ -739,27 +730,22 @@ void vp10_build_obmc_inter_prediction(VP10_COMMON *cm,
#if CONFIG_VP9_HIGHBITDEPTH
if (is_hbd) {
uint16_t *dst16 = CONVERT_TO_SHORTPTR(dst);
uint16_t *bmc16 = CONVERT_TO_SHORTPTR(bmc);
uint16_t *tmp16 = CONVERT_TO_SHORTPTR(tmp);
for (row = 0; row < bh; ++row) {
for (col = 0; col < bw; ++col) {
dst16[col] = (mask[0][row] * bmc16[col] + mask[1][row] * tmp16[col]
for (col = 0; col < bw; ++col)
dst16[col] = (mask[0][row] * dst16[col] + mask[1][row] * tmp16[col]
+ 32) >> 6;
}
dst16 += dst_stride;
bmc16 += bmc_stride;
tmp16 += tmp_stride;
}
} else {
#endif // CONFIG_VP9_HIGHBITDEPTH
for (row = 0; row < bh; ++row) {
for (col = 0; col < bw; ++col) {
dst[col] = (mask[0][col] * bmc[col] + mask[1][col] * tmp[col] + 32)
for (col = 0; col < bw; ++col)
dst[col] = (mask[0][col] * dst[col] + mask[1][col] * tmp[col] + 32)
>> 6;
}
dst += dst_stride;
bmc += bmc_stride;
tmp += tmp_stride;
}
#if CONFIG_VP9_HIGHBITDEPTH
......
......@@ -5089,6 +5089,8 @@ static int64_t handle_inter_mode(VP10_COMP *cpi, MACROBLOCK *x,
DECLARE_ALIGNED(16, uint8_t, tmp_buf[MAX_MB_PLANE * 64 * 64]);
#endif // CONFIG_VP9_HIGHBITDEPTH
#if CONFIG_OBMC
int allow_obmc = is_obmc_allowed(mbmi);
int best_obmc_flag = 0;
#if CONFIG_VP9_HIGHBITDEPTH
DECLARE_ALIGNED(16, uint16_t, tmp_buf1_16[MAX_MB_PLANE * 64 * 64]);
uint8_t *tmp_buf1;
......@@ -5098,13 +5100,11 @@ static int64_t handle_inter_mode(VP10_COMP *cpi, MACROBLOCK *x,
uint8_t *obmc_tmp_buf[3] = {tmp_buf1, tmp_buf1 + 4096, tmp_buf1 + 8192};
#endif // CONFIG_VP9_HIGHBITDEPTH
int obmc_tmp_stride[3] = {64, 64, 64};
int best_obmc_flag = 0;
uint8_t tmp_skip_txfm[MAX_MB_PLANE << 2] = {0};
int64_t tmp_bsse[MAX_MB_PLANE << 2] = {0};
int64_t rdobmc;
int skip_txfm_sb_obmc = 0;
int64_t skip_sse_sb_obmc = INT64_MAX;
int allow_obmc = is_obmc_allowed(mbmi);
#endif // CONFIG_OBMC
int pred_exists = 0;
int intpel_mv;
......@@ -5587,6 +5587,7 @@ static int64_t handle_inter_mode(VP10_COMP *cpi, MACROBLOCK *x,
#if CONFIG_OBMC
int tmp_rate_obmc;
int64_t tmp_dist_obmc;
restore_dst_buf(xd, orig_dst, orig_dst_stride);
#endif // CONFIG_OBMC
// Handles the special case when a filter that is not in the
// switchable list (ex. bilinear) is indicated at the frame level, or
......@@ -5594,19 +5595,14 @@ static int64_t handle_inter_mode(VP10_COMP *cpi, MACROBLOCK *x,
vp10_build_inter_predictors_sb(xd, mi_row, mi_col, bsize);
#if CONFIG_OBMC
if (mbmi->obmc) {
vp10_build_obmc_inter_prediction(cm, xd, mi_row, mi_col, 1,
obmc_tmp_buf, obmc_tmp_stride,
vp10_build_obmc_inter_prediction(cm, xd, mi_row, mi_col, 0,
NULL, NULL,
dst_buf1, dst_stride1,
dst_buf2, dst_stride2);
for (i = 0; i < MAX_MB_PLANE; ++i) {
xd->plane[i].dst.buf = obmc_tmp_buf[i];
xd->plane[i].dst.stride = obmc_tmp_stride[i];
}
model_rd_for_sb(cpi, bsize, x, xd, &tmp_rate, &tmp_dist,
&skip_txfm_sb, &skip_sse_sb);
rd = RDCOST(x->rdmult, x->rddiv,
rs + tmp_rate + cpi->obmc_cost[bsize][1],
tmp_dist);
rs + tmp_rate + cpi->obmc_cost[bsize][1], tmp_dist);
} else {
#endif // CONFIG_OBMC
model_rd_for_sb(cpi, bsize, x, xd, &tmp_rate, &tmp_dist,
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment