Commit d1cad9c3 authored by Yue Chen's avatar Yue Chen

Overlapped block motion compensation experiment

In this experiment, an obmc inter prediction mode is enabled for
>= 8X8 inter blocks. When the obmc flag is on, the regular block-
based motion compensation will be refined by using predictors of
the above and left blocks.
Fixed some compatibility issues with vp9_highbitdepth, supertx,
ref_mv, and ext_interp.

Coding gain (%) on derflr/hevcmr/hevchd
OBMC:
1.047/1.022/0.708
OBMC + SUPERTX:
1.652/1.616/1.137
SUPERTX:
0.862/0.779/0.630

Change-Id: I5d8d3c4729c6d3ccb03ec7034563107893103b7f
parent a45d5d3f
......@@ -167,6 +167,10 @@ typedef struct {
INTRA_FILTER intra_filter;
#endif // CONFIG_EXT_INTRA
#if CONFIG_OBMC
int8_t obmc;
#endif // CONFIG_OBMC
int_mv mv[2];
int_mv pred_mv[2];
#if CONFIG_REF_MV
......@@ -192,6 +196,12 @@ static INLINE int has_second_ref(const MB_MODE_INFO *mbmi) {
return mbmi->ref_frame[1] > INTRA_FRAME;
}
#if CONFIG_OBMC
static INLINE int is_obmc_allowed(const MB_MODE_INFO *mbmi) {
return (mbmi->sb_type >= BLOCK_8X8);
}
#endif // CONFIG_OBMC
PREDICTION_MODE vp10_left_block_mode(const MODE_INFO *cur_mi,
const MODE_INFO *left_mi, int b);
......
......@@ -228,6 +228,12 @@ static const vpx_prob default_inter_compound_mode_probs
};
#endif // CONFIG_EXT_INTER
#if CONFIG_OBMC
static const vpx_prob default_obmc_prob[BLOCK_SIZES] = {
255, 255, 255, 151, 153, 144, 178, 165, 160, 207, 195, 168, 244,
};
#endif // CONFIG_OBMC
/* Array indices are identical to previously-existing INTRAMODECONTEXTNODES. */
const vpx_tree_index vp10_intra_mode_tree[TREE_SIZE(INTRA_MODES)] = {
-DC_PRED, 2, /* 0 = DC_NODE */
......@@ -1303,6 +1309,9 @@ static void init_mode_probs(FRAME_CONTEXT *fc) {
#endif // CONFIG_EXT_INTER
#endif // CONFIG_REF_MV
vp10_copy(fc->inter_mode_probs, default_inter_mode_probs);
#if CONFIG_OBMC
vp10_copy(fc->obmc_prob, default_obmc_prob);
#endif // CONFIG_OBMC
#if CONFIG_EXT_INTER
vp10_copy(fc->inter_compound_mode_probs, default_inter_compound_mode_probs);
#endif // CONFIG_EXT_INTER
......@@ -1383,6 +1392,12 @@ void vp10_adapt_inter_frame_probs(VP10_COMMON *cm) {
counts->inter_mode[i], fc->inter_mode_probs[i]);
#endif
#if CONFIG_OBMC
for (i = BLOCK_8X8; i < BLOCK_SIZES; ++i)
fc->obmc_prob[i] = mode_mv_merge_probs(pre_fc->obmc_prob[i],
counts->obmc[i]);
#endif // CONFIG_OBMC
#if CONFIG_SUPERTX
for (i = 0; i < PARTITION_SUPERTX_CONTEXTS; ++i) {
int j;
......
......@@ -81,6 +81,9 @@ typedef struct frame_contexts {
vpx_prob inter_compound_mode_probs[INTER_MODE_CONTEXTS]
[INTER_COMPOUND_MODES - 1];
#endif // CONFIG_EXT_INTER
#if CONFIG_OBMC
vpx_prob obmc_prob[BLOCK_SIZES];
#endif // CONFIG_OBMC
vpx_prob intra_inter_prob[INTRA_INTER_CONTEXTS];
vpx_prob comp_inter_prob[COMP_INTER_CONTEXTS];
vpx_prob single_ref_prob[REF_CONTEXTS][SINGLE_REFS-1];
......@@ -135,6 +138,9 @@ typedef struct FRAME_COUNTS {
#if CONFIG_EXT_INTER
unsigned int inter_compound_mode[INTER_MODE_CONTEXTS][INTER_COMPOUND_MODES];
#endif // CONFIG_EXT_INTER
#if CONFIG_OBMC
unsigned int obmc[BLOCK_SIZES][2];
#endif // CONFIG_OBMC
unsigned int intra_inter[INTRA_INTER_CONTEXTS][2];
unsigned int comp_inter[COMP_INTER_CONTEXTS][2];
unsigned int single_ref[REF_CONTEXTS][SINGLE_REFS-1][2];
......
......@@ -18,6 +18,9 @@
#include "vp10/common/blockd.h"
#include "vp10/common/reconinter.h"
#include "vp10/common/reconintra.h"
#if CONFIG_OBMC
#include "vp10/common/onyxc_int.h"
#endif // CONFIG_OBMC
#if CONFIG_VP9_HIGHBITDEPTH
void vp10_highbd_build_inter_predictor(const uint8_t *src, int src_stride,
......@@ -64,12 +67,20 @@ void vp10_build_inter_predictor(const uint8_t *src, int src_stride,
sf, w, h, ref, interp_filter, sf->x_step_q4, sf->y_step_q4);
}
void build_inter_predictors(MACROBLOCKD *xd, int plane, int block,
void build_inter_predictors(MACROBLOCKD *xd, int plane,
#if CONFIG_OBMC
int mi_col_offset, int mi_row_offset,
#endif // CONFIG_OBMC
int block,
int bw, int bh,
int x, int y, int w, int h,
int mi_x, int mi_y) {
struct macroblockd_plane *const pd = &xd->plane[plane];
#if CONFIG_OBMC
const MODE_INFO *mi = xd->mi[mi_col_offset + xd->mi_stride * mi_row_offset];
#else
const MODE_INFO *mi = xd->mi[0];
#endif // CONFIG_OBMC
const int is_compound = has_second_ref(&mi->mbmi);
const INTERP_FILTER interp_filter = mi->mbmi.interp_filter;
int ref;
......@@ -201,10 +212,18 @@ static void build_inter_predictors_for_planes(MACROBLOCKD *xd, BLOCK_SIZE bsize,
assert(pw * num_4x4_w == bw && ph * num_4x4_h == bh);
for (y = 0; y < num_4x4_h; ++y)
for (x = 0; x < num_4x4_w; ++x)
build_inter_predictors(xd, plane, y * 2 + x, bw, bh,
build_inter_predictors(xd, plane,
#if CONFIG_OBMC
0, 0,
#endif // CONFIG_OBMC
y * 2 + x, bw, bh,
4 * x, 4 * y, pw, ph, mi_x, mi_y);
} else {
build_inter_predictors(xd, plane, 0, bw, bh,
build_inter_predictors(xd, plane,
#if CONFIG_OBMC
0, 0,
#endif // CONFIG_OBMC
0, bw, bh,
0, 0, bw, bh, mi_x, mi_y);
}
}
......@@ -309,7 +328,6 @@ static void generate_1dmask(int length, uint8_t *mask, int plane) {
}
}
void vp10_build_masked_inter_predictor_complex(
MACROBLOCKD *xd,
uint8_t *dst, int dst_stride, uint8_t *dst2, int dst2_stride,
......@@ -483,9 +501,271 @@ void vp10_build_inter_predictors_sb_sub8x8(MACROBLOCKD *xd,
const int bw = 4 * num_4x4_w;
const int bh = 4 * num_4x4_h;
build_inter_predictors(xd, plane, block, bw, bh,
build_inter_predictors(xd, plane,
#if CONFIG_OBMC
0, 0,
#endif // CONFIG_OBMC
block, bw, bh,
0, 0, bw, bh,
mi_x, mi_y);
}
}
#endif // CONFIG_SUPERTX
#if CONFIG_OBMC
// obmc_mask_N[is_neighbor_predictor][overlap_position]
static const uint8_t obmc_mask_1[2][1] = {
{ 55},
{ 9}
};
static const uint8_t obmc_mask_2[2][2] = {
{ 45, 62},
{ 19, 2}
};
static const uint8_t obmc_mask_4[2][4] = {
{ 39, 50, 59, 64},
{ 25, 14, 5, 0}
};
static const uint8_t obmc_mask_8[2][8] = {
{ 36, 42, 48, 53, 57, 61, 63, 64},
{ 28, 22, 16, 11, 7, 3, 1, 0}
};
static const uint8_t obmc_mask_16[2][16] = {
{ 34, 37, 40, 43, 46, 49, 52, 54, 56, 58, 60, 61, 63, 64, 64, 64},
{ 30, 27, 24, 21, 18, 15, 12, 10, 8, 6, 4, 3, 1, 0, 0, 0}
};
static const uint8_t obmc_mask_32[2][32] = {
{ 33, 35, 36, 38, 40, 41, 43, 44, 45, 47, 48, 50, 51, 52, 53, 55,
56, 57, 58, 59, 60, 60, 61, 62, 62, 63, 63, 64, 64, 64, 64, 64},
{ 31, 29, 28, 26, 24, 23, 21, 20, 19, 17, 16, 14, 13, 12, 11, 9,
8, 7, 6, 5, 4, 4, 3, 2, 2, 1, 1, 0, 0, 0, 0, 0}
};
void setup_obmc_mask(int length, const uint8_t *mask[2]) {
switch (length) {
case 1:
mask[0] = obmc_mask_1[0];
mask[1] = obmc_mask_1[1];
break;
case 2:
mask[0] = obmc_mask_2[0];
mask[1] = obmc_mask_2[1];
break;
case 4:
mask[0] = obmc_mask_4[0];
mask[1] = obmc_mask_4[1];
break;
case 8:
mask[0] = obmc_mask_8[0];
mask[1] = obmc_mask_8[1];
break;
case 16:
mask[0] = obmc_mask_16[0];
mask[1] = obmc_mask_16[1];
break;
case 32:
mask[0] = obmc_mask_32[0];
mask[1] = obmc_mask_32[1];
break;
default:
mask[0] = obmc_mask_32[0];
mask[1] = obmc_mask_32[1];
assert(0);
break;
}
}
// This function combines motion compensated predictions that is generated by
// top/left neighboring blocks' inter predictors with the regular inter
// prediction. We assume the original prediction (bmc) is stored in
// xd->plane[].dst.buf
void vp10_build_obmc_inter_prediction(VP10_COMMON *cm,
MACROBLOCKD *xd, int mi_row, int mi_col,
int use_tmp_dst_buf,
uint8_t *final_buf[MAX_MB_PLANE],
int final_stride[MAX_MB_PLANE],
uint8_t *tmp_buf1[MAX_MB_PLANE],
int tmp_stride1[MAX_MB_PLANE],
uint8_t *tmp_buf2[MAX_MB_PLANE],
int tmp_stride2[MAX_MB_PLANE]) {
const TileInfo *const tile = &xd->tile;
BLOCK_SIZE bsize = xd->mi[0]->mbmi.sb_type;
int plane, i, mi_step;
#if CONFIG_VP9_HIGHBITDEPTH
int is_hbd = (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) ? 1 : 0;
#endif // CONFIG_VP9_HIGHBITDEPTH
if (use_tmp_dst_buf) {
for (plane = 0; plane < MAX_MB_PLANE; ++plane) {
const struct macroblockd_plane *pd = &xd->plane[plane];
int bw = (xd->n8_w * 8) >> pd->subsampling_x;
int bh = (xd->n8_h * 8) >> pd->subsampling_y;
int row;
#if CONFIG_VP9_HIGHBITDEPTH
if (is_hbd) {
uint16_t *final_buf16 = CONVERT_TO_SHORTPTR(final_buf[plane]);
uint16_t *bmc_buf16 = CONVERT_TO_SHORTPTR(pd->dst.buf);
for (row = 0; row < bh; ++row)
memcpy(final_buf16 + row * final_stride[plane],
bmc_buf16 + row * pd->dst.stride, bw * sizeof(uint16_t));
} else {
#endif
for (row = 0; row < bh; ++row)
memcpy(final_buf[plane] + row * final_stride[plane],
pd->dst.buf + row * pd->dst.stride, bw);
#if CONFIG_VP9_HIGHBITDEPTH
}
#endif // CONFIG_VP9_HIGHBITDEPTH
}
}
// handle above row
for (i = 0; mi_row > 0 && i < VPXMIN(xd->n8_w, cm->mi_cols - mi_col);
i += mi_step) {
int mi_row_offset = -1;
int mi_col_offset = i;
int overlap;
MODE_INFO *above_mi = xd->mi[mi_col_offset +
mi_row_offset * xd->mi_stride];
MB_MODE_INFO *above_mbmi = &above_mi->mbmi;
mi_step = VPXMIN(xd->n8_w,
num_8x8_blocks_wide_lookup[above_mbmi->sb_type]);
if (!is_inter_block(above_mbmi))
continue;
overlap = (above_mbmi->skip) ?
num_4x4_blocks_high_lookup[bsize] << 1 :
VPXMIN(num_4x4_blocks_high_lookup[bsize],
num_4x4_blocks_high_lookup[above_mbmi->sb_type]) << 1;
for (plane = 0; plane < MAX_MB_PLANE; ++plane) {
const struct macroblockd_plane *pd = &xd->plane[plane];
int bw = (mi_step * 8) >> pd->subsampling_x;
int bh = overlap >> pd->subsampling_y;
int row, col;
int dst_stride = use_tmp_dst_buf ? final_stride[plane] : pd->dst.stride;
uint8_t *dst = use_tmp_dst_buf ?
&final_buf[plane][(i * 8) >> pd->subsampling_x] :
&pd->dst.buf[(i * 8) >> pd->subsampling_x];
int bmc_stride = pd->dst.stride;
uint8_t *bmc = &pd->dst.buf[(i * 8) >> pd->subsampling_x];
int tmp_stride = tmp_stride1[plane];
uint8_t *tmp = &tmp_buf1[plane][(i * 8) >> pd->subsampling_x];
const uint8_t *mask[2];
setup_obmc_mask(bh, mask);
#if CONFIG_VP9_HIGHBITDEPTH
if (is_hbd) {
uint16_t *dst16 = CONVERT_TO_SHORTPTR(dst);
uint16_t *bmc16 = CONVERT_TO_SHORTPTR(bmc);
uint16_t *tmp16 = CONVERT_TO_SHORTPTR(tmp);
for (row = 0; row < bh; ++row) {
for (col = 0; col < bw; ++col) {
dst16[col] = (mask[0][row] * bmc16[col] + mask[1][row] * tmp16[col]
+ 32) >> 6;
}
dst16 += dst_stride;
bmc16 += bmc_stride;
tmp16 += tmp_stride;
}
} else {
#endif // CONFIG_VP9_HIGHBITDEPTH
for (row = 0; row < bh; ++row) {
for (col = 0; col < bw; ++col) {
dst[col] = (mask[0][row] * bmc[col] + mask[1][row] * tmp[col] + 32)
>> 6;
}
dst += dst_stride;
bmc += bmc_stride;
tmp += tmp_stride;
}
#if CONFIG_VP9_HIGHBITDEPTH
}
#endif // CONFIG_VP9_HIGHBITDEPTH
}
} // each mi in the above row
if (mi_col == 0 || (mi_col - 1 < tile->mi_col_start) ||
(mi_col - 1) >= tile->mi_col_end)
return;
// handle left column
for (i = 0; i < VPXMIN(xd->n8_h, cm->mi_rows - mi_row);
i += mi_step) {
int mi_row_offset = i;
int mi_col_offset = -1;
int overlap;
MODE_INFO *left_mi = xd->mi[mi_col_offset +
mi_row_offset * xd->mi_stride];
MB_MODE_INFO *left_mbmi = &left_mi->mbmi;
mi_step = VPXMIN(xd->n8_h,
num_8x8_blocks_high_lookup[left_mbmi->sb_type]);
if (!is_inter_block(left_mbmi))
continue;
overlap = (left_mbmi->skip) ?
num_4x4_blocks_wide_lookup[bsize] << 1 :
VPXMIN(num_4x4_blocks_wide_lookup[bsize],
num_4x4_blocks_wide_lookup[left_mbmi->sb_type]) << 1;
for (plane = 0; plane < MAX_MB_PLANE; ++plane) {
const struct macroblockd_plane *pd = &xd->plane[plane];
int bw = overlap >> pd->subsampling_x;
int bh = (mi_step * 8) >> pd->subsampling_y;
int row, col;
int dst_stride = use_tmp_dst_buf ? final_stride[plane] : pd->dst.stride;
uint8_t *dst = use_tmp_dst_buf ?
&final_buf[plane][(i * 8 * dst_stride) >> pd->subsampling_y] :
&pd->dst.buf[(i * 8 * dst_stride) >> pd->subsampling_y];
int bmc_stride = pd->dst.stride;
uint8_t *bmc = &pd->dst.buf[(i * 8 * bmc_stride) >> pd->subsampling_y];
int tmp_stride = tmp_stride2[plane];
uint8_t *tmp = &tmp_buf2[plane]
[(i * 8 * tmp_stride) >> pd->subsampling_y];
const uint8_t *mask[2];
setup_obmc_mask(bw, mask);
#if CONFIG_VP9_HIGHBITDEPTH
if (is_hbd) {
uint16_t *dst16 = CONVERT_TO_SHORTPTR(dst);
uint16_t *bmc16 = CONVERT_TO_SHORTPTR(bmc);
uint16_t *tmp16 = CONVERT_TO_SHORTPTR(tmp);
for (row = 0; row < bh; ++row) {
for (col = 0; col < bw; ++col) {
dst16[col] = (mask[0][row] * bmc16[col] + mask[1][row] * tmp16[col]
+ 32) >> 6;
}
dst16 += dst_stride;
bmc16 += bmc_stride;
tmp16 += tmp_stride;
}
} else {
#endif // CONFIG_VP9_HIGHBITDEPTH
for (row = 0; row < bh; ++row) {
for (col = 0; col < bw; ++col) {
dst[col] = (mask[0][col] * bmc[col] + mask[1][col] * tmp[col] + 32)
>> 6;
}
dst += dst_stride;
bmc += bmc_stride;
tmp += tmp_stride;
}
#if CONFIG_VP9_HIGHBITDEPTH
}
#endif // CONFIG_VP9_HIGHBITDEPTH
}
} // each mi in the left column
}
#endif // CONFIG_OBMC
......@@ -177,7 +177,11 @@ static INLINE MV average_split_mvs(const struct macroblockd_plane *pd,
return res;
}
void build_inter_predictors(MACROBLOCKD *xd, int plane, int block,
void build_inter_predictors(MACROBLOCKD *xd, int plane,
#if CONFIG_OBMC
int mi_col_offset, int mi_row_offset,
#endif // CONFIG_OBMC
int block,
int bw, int bh,
int x, int y, int w, int h,
int mi_x, int mi_y);
......@@ -352,6 +356,19 @@ static INLINE int vp10_is_interp_needed(const MACROBLOCKD *const xd) {
return !intpel_mv;
}
#endif // CONFIG_EXT_INTERP
#if CONFIG_OBMC
void vp10_build_obmc_inter_prediction(VP10_COMMON *cm,
MACROBLOCKD *xd, int mi_row, int mi_col,
int use_tmp_dst_buf,
uint8_t *final_buf[MAX_MB_PLANE],
int final_stride[MAX_MB_PLANE],
uint8_t *tmp_buf1[MAX_MB_PLANE],
int tmp_stride1[MAX_MB_PLANE],
uint8_t *tmp_buf2[MAX_MB_PLANE],
int tmp_stride2[MAX_MB_PLANE]);
#endif // CONFIG_OBMC
#ifdef __cplusplus
} // extern "C"
#endif
......
......@@ -657,7 +657,7 @@ static void dec_build_inter_predictors(VP10Decoder *const pbi, MACROBLOCKD *xd,
buf_stride, subpel_x, subpel_y;
uint8_t *ref_frame, *buf_ptr;
#if CONFIG_EXT_INTERP
const int i_filter = IsInterpolatingFilter(xd->mi[0]->mbmi.interp_filter);
const int i_filter = IsInterpolatingFilter(interp_filter);
#endif // CONFIG_EXT_INTERP
// Get reference frame pointer, width and height.
......@@ -699,6 +699,11 @@ static void dec_build_inter_predictors(VP10Decoder *const pbi, MACROBLOCKD *xd,
xs = sf->x_step_q4;
ys = sf->y_step_q4;
} else {
#if CONFIG_OBMC
const MV mv_q4 = clamp_mv_to_umv_border_sb(xd, mv, bw, bh,
pd->subsampling_x,
pd->subsampling_y);
#endif // CONFIG_OBMC
// Co-ordinate of containing block to pixel precision.
x0 = (-xd->mb_to_left_edge >> (3 + pd->subsampling_x)) + x;
y0 = (-xd->mb_to_top_edge >> (3 + pd->subsampling_y)) + y;
......@@ -707,8 +712,13 @@ static void dec_build_inter_predictors(VP10Decoder *const pbi, MACROBLOCKD *xd,
x0_16 = x0 << SUBPEL_BITS;
y0_16 = y0 << SUBPEL_BITS;
#if CONFIG_OBMC
scaled_mv.row = mv_q4.row;
scaled_mv.col = mv_q4.col;
#else
scaled_mv.row = mv->row * (1 << (1 - pd->subsampling_y));
scaled_mv.col = mv->col * (1 << (1 - pd->subsampling_x));
#endif // CONFIG_OBMC
xs = ys = 16;
}
subpel_x = scaled_mv.col & SUBPEL_MASK;
......@@ -871,6 +881,7 @@ static void dec_build_inter_predictors_sb(VP10Decoder *const pbi,
}
}
}
#if CONFIG_SUPERTX
static void dec_build_inter_predictors_sb_sub8x8(VP10Decoder *const pbi,
MACROBLOCKD *xd,
......@@ -914,7 +925,211 @@ static void dec_build_inter_predictors_sb_sub8x8(VP10Decoder *const pbi,
}
}
}
#endif
#endif // CONFIG_SUPERTX
#if CONFIG_OBMC
static void dec_build_prediction_by_above_preds(VP10Decoder *const pbi,
MACROBLOCKD *xd,
int mi_row, int mi_col,
uint8_t *tmp_buf[MAX_MB_PLANE],
int tmp_stride[MAX_MB_PLANE]) {
VP10_COMMON *const cm = &pbi->common;
BLOCK_SIZE bsize = xd->mi[0]->mbmi.sb_type;
int i, j, mi_step, ref;
if (mi_row == 0)
return;
for (i = 0; i < VPXMIN(xd->n8_w, cm->mi_cols - mi_col); i += mi_step) {
int mi_row_offset = -1;
int mi_col_offset = i;
int mi_x, mi_y, bw, bh;
const MODE_INFO *mi = xd->mi[mi_col_offset + mi_row_offset * cm->mi_stride];
const MB_MODE_INFO *mbmi = &mi->mbmi;
const BLOCK_SIZE sb_type = mbmi->sb_type;
const int is_compound = has_second_ref(mbmi);
const INTERP_FILTER interp_filter = mbmi->interp_filter;
mi_step = VPXMIN(xd->n8_w, num_8x8_blocks_wide_lookup[sb_type]);
if (!is_inter_block(mbmi))
continue;
for (j = 0; j < MAX_MB_PLANE; ++j) {
struct macroblockd_plane *const pd = &xd->plane[j];
setup_pred_plane(&pd->dst,
tmp_buf[j], tmp_stride[j],
0, i, NULL,
pd->subsampling_x, pd->subsampling_y);
}
for (ref = 0; ref < 1 + is_compound; ++ref) {
MV_REFERENCE_FRAME frame = mbmi->ref_frame[ref];
RefBuffer *ref_buf = &cm->frame_refs[frame - LAST_FRAME];
xd->block_refs[ref] = ref_buf;
if ((!vp10_is_valid_scale(&ref_buf->sf)))
vpx_internal_error(xd->error_info, VPX_CODEC_UNSUP_BITSTREAM,
"Reference frame has invalid dimensions");
vp10_setup_pre_planes(xd, ref, ref_buf->buf, mi_row, mi_col + i,
&ref_buf->sf);
}
xd->mb_to_left_edge = -(((mi_col + i) * MI_SIZE) * 8);
mi_x = (mi_col + i) << MI_SIZE_LOG2;
mi_y = mi_row << MI_SIZE_LOG2;
for (j = 0; j < MAX_MB_PLANE; ++j) {
struct macroblockd_plane *pd = &xd->plane[j];
struct buf_2d *const dst_buf = &pd->dst;
bw = (mi_step * 8) >> pd->subsampling_x;
bh = VPXMAX((num_4x4_blocks_high_lookup[bsize] * 2) >> pd->subsampling_y,
4);
for (ref = 0; ref < 1 + is_compound; ++ref) {
const struct scale_factors *const sf = &xd->block_refs[ref]->sf;
struct buf_2d *const pre_buf = &pd->pre[ref];
const int idx = xd->block_refs[ref]->idx;
BufferPool *const pool = pbi->common.buffer_pool;
RefCntBuffer *const ref_frame_buf = &pool->frame_bufs[idx];
const int is_scaled = vp10_is_scaled(sf);
if (sb_type < BLOCK_8X8) {
const PARTITION_TYPE bp = BLOCK_8X8 - sb_type;
const int have_vsplit = bp != PARTITION_HORZ;
const int have_hsplit = bp != PARTITION_VERT;
const int num_4x4_w = 2 >> ((!have_vsplit) | pd->subsampling_x);
const int num_4x4_h = 2 >> ((!have_hsplit) | pd->subsampling_y);
const int pw = 8 >> (have_vsplit | pd->subsampling_x);
int x, y;
for (y = 0; y < num_4x4_h; ++y)
for (x = 0; x < num_4x4_w; ++x) {
const MV mv = average_split_mvs(pd, mi, ref, y * 2 + x);
if ((bp == PARTITION_HORZ || bp == PARTITION_SPLIT)
&& y == 0 && !pd->subsampling_y)
continue;
dec_build_inter_predictors(pbi, xd, j, bw, bh,
4 * x, 0, pw, bh, mi_x, mi_y,
interp_filter, sf, pre_buf, dst_buf,
&mv, ref_frame_buf, is_scaled, ref);
}
} else {
const MV mv = mi->mbmi.mv[ref].as_mv;
dec_build_inter_predictors(pbi, xd, j, bw, bh,
0, 0, bw, bh, mi_x, mi_y, interp_filter,
sf, pre_buf, dst_buf, &mv, ref_frame_buf,
is_scaled, ref);
}
}
}
}
xd->mb_to_left_edge = -((mi_col * MI_SIZE) * 8);
}
static void dec_build_prediction_by_left_preds(VP10Decoder *const pbi,
MACROBLOCKD *xd,
int mi_row, int mi_col,