Commit 761b1ac8 authored by David Barker's avatar David Barker Committed by Debargha Mukherjee

ext-partition: Don't read not-yet-decoded values

When deciding whether the top-right or bottom-left blocks are
available, we currently always act as if we're using 128x128
superblocks. This means that, when using 64x64 superblocks,
we sometimes conclude that blocks are available when they haven't
been decoded yet!

This typically happens at, for example, mi_row=15, mi_col=16
(for bottom left), where we're at a 64x64 boundary but not
a 128x128 boundary.

This patch fixes the issue by checking based on the signalled
superblock size.

Note: Most of this patch is just threading 'cm' through the
intra prediction process, so that we have access to cm->sb_size
in has_top_right() and has_bottom_left()

Change-Id: I126964c510aafffc870e7cd8b3e64a46abb14b3a
parent f58f111f
......@@ -334,10 +334,11 @@ static uint8_t scan_blk_mbmi(const AV1_COMMON *cm, const MACROBLOCKD *xd,
return newmv_count;
}
static int has_top_right(const MACROBLOCKD *xd, int mi_row, int mi_col,
int bs) {
const int mask_row = mi_row & MAX_MIB_MASK;
const int mask_col = mi_col & MAX_MIB_MASK;
static int has_top_right(const AV1_COMMON *cm, const MACROBLOCKD *xd,
int mi_row, int mi_col, int bs) {
const int sb_mi_size = mi_size_wide[cm->sb_size];
const int mask_row = mi_row & (sb_mi_size - 1);
const int mask_col = mi_col & (sb_mi_size - 1);
// In a split partition all apart from the bottom right has a top right
int has_tr = !((mask_row & bs) && (mask_col & bs));
......@@ -348,7 +349,7 @@ static int has_top_right(const MACROBLOCKD *xd, int mi_row, int mi_col,
// For each 4x4 group of blocks, when the bottom right is decoded the blocks
// to the right have not been decoded therefore the bottom right does
// not have a top right
while (bs < MAX_MIB_SIZE) {
while (bs < sb_mi_size) {
if (mask_col & bs) {
if ((mask_col & (2 * bs)) && (mask_row & (2 * bs))) {
has_tr = 0;
......@@ -381,13 +382,15 @@ static int has_top_right(const MACROBLOCKD *xd, int mi_row, int mi_col,
}
#if CONFIG_MFMV
static int check_sb_border(const int mi_row, const int mi_col,
const int row_offset, const int col_offset) {
const int row = mi_row & MAX_MIB_MASK;
const int col = mi_col & MAX_MIB_MASK;
if (row + row_offset < 0 || row + row_offset >= MAX_MIB_SIZE ||
col + col_offset < 0 || col + col_offset >= MAX_MIB_SIZE)
static int check_sb_border(const AV1_COMMON *cm, const int mi_row,
const int mi_col, const int row_offset,
const int col_offset) {
const int sb_mi_size = mi_size_wide[cm->sb_size];
const int row = mi_row & (sb_mi_size - 1);
const int col = mi_col & (sb_mi_size - 1);
if (row + row_offset < 0 || row + row_offset >= sb_mi_size ||
col + col_offset < 0 || col + col_offset >= sb_mi_size)
return 0;
return 1;
......@@ -581,7 +584,7 @@ static void setup_ref_mv_list(const AV1_COMMON *cm, const MACROBLOCKD *xd,
#endif
const int bs = AOMMAX(xd->n8_w, xd->n8_h);
const int has_tr = has_top_right(xd, mi_row, mi_col, bs);
const int has_tr = has_top_right(cm, xd, mi_row, mi_col, bs);
MV_REFERENCE_FRAME rf[2];
const TileInfo *const tile = &xd->tile;
......@@ -661,7 +664,7 @@ static void setup_ref_mv_list(const AV1_COMMON *cm, const MACROBLOCKD *xd,
blk_row = tpl_sample_pos[i][0];
blk_col = tpl_sample_pos[i][1];
if (!check_sb_border(mi_row, mi_col, blk_row, blk_col)) continue;
if (!check_sb_border(cm, mi_row, mi_col, blk_row, blk_col)) continue;
coll_blk_count += add_tpl_ref_mv(cm, prev_frame_mvs_base, xd, mi_row,
mi_col, ref_frame, blk_row, blk_col,
......@@ -838,6 +841,7 @@ static void find_mv_refs_idx(const AV1_COMMON *cm, const MACROBLOCKD *xd,
void *const data, int16_t *mode_context,
int_mv zeromv) {
const int *ref_sign_bias = cm->ref_frame_sign_bias;
const int sb_mi_size = mi_size_wide[cm->sb_size];
int i, refmv_count = 0;
int different_ref_found = 0;
int context_counter = 0;
......@@ -943,8 +947,8 @@ static void find_mv_refs_idx(const AV1_COMMON *cm, const MACROBLOCKD *xd,
? NULL
: &xd->mi[mv_ref->col + mv_ref->row * xd->mi_stride]->mbmi;
if (candidate == NULL) continue;
if ((mi_row % MAX_MIB_SIZE) + mv_ref->row >= MAX_MIB_SIZE ||
(mi_col % MAX_MIB_SIZE) + mv_ref->col >= MAX_MIB_SIZE)
if ((mi_row & (sb_mi_size - 1)) + mv_ref->row >= sb_mi_size ||
(mi_col & (sb_mi_size - 1)) + mv_ref->col >= sb_mi_size)
continue;
different_ref_found = 1;
......@@ -995,8 +999,8 @@ static void find_mv_refs_idx(const AV1_COMMON *cm, const MACROBLOCKD *xd,
? NULL
: &xd->mi[mv_ref->col + mv_ref->row * xd->mi_stride]->mbmi;
if (candidate == NULL) continue;
if ((mi_row % MAX_MIB_SIZE) + mv_ref->row >= MAX_MIB_SIZE ||
(mi_col % MAX_MIB_SIZE) + mv_ref->col >= MAX_MIB_SIZE)
if ((mi_row & (sb_mi_size - 1)) + mv_ref->row >= sb_mi_size ||
(mi_col & (sb_mi_size - 1)) + mv_ref->col >= sb_mi_size)
continue;
// If the candidate is INTRA we don't want to consider its mv.
......@@ -1989,7 +1993,8 @@ int findSamples(const AV1_COMMON *cm, MACROBLOCKD *xd, int mi_row, int mi_col,
assert(2 * np <= SAMPLES_ARRAY_SIZE);
// Top-right block
if (do_tr && has_top_right(xd, mi_row, mi_col, AOMMAX(xd->n8_w, xd->n8_h))) {
if (do_tr &&
has_top_right(cm, xd, mi_row, mi_col, AOMMAX(xd->n8_w, xd->n8_h))) {
POSITION trb_pos = { -1, xd->n8_w };
if (is_inside(tile, mi_col, mi_row, cm->mi_rows, cm, &trb_pos)) {
......
......@@ -1709,7 +1709,7 @@ void av1_build_inter_predictors_sby(const AV1_COMMON *cm, MACROBLOCKD *xd,
BUFFER_SET default_ctx = { { xd->plane[0].dst.buf, NULL, NULL },
{ xd->plane[0].dst.stride, 0, 0 } };
if (!ctx) ctx = &default_ctx;
av1_build_interintra_predictors_sby(xd, xd->plane[0].dst.buf,
av1_build_interintra_predictors_sby(cm, xd, xd->plane[0].dst.buf,
xd->plane[0].dst.stride, ctx, bsize);
}
#else
......@@ -1730,8 +1730,8 @@ void av1_build_inter_predictors_sbuv(const AV1_COMMON *cm, MACROBLOCKD *xd,
};
if (!ctx) ctx = &default_ctx;
av1_build_interintra_predictors_sbuv(
xd, xd->plane[1].dst.buf, xd->plane[2].dst.buf, xd->plane[1].dst.stride,
xd->plane[2].dst.stride, ctx, bsize);
cm, xd, xd->plane[1].dst.buf, xd->plane[2].dst.buf,
xd->plane[1].dst.stride, xd->plane[2].dst.stride, ctx, bsize);
}
#else
(void)ctx;
......@@ -3115,7 +3115,8 @@ static void combine_interintra_highbd(
}
#endif // CONFIG_HIGHBITDEPTH
void av1_build_intra_predictors_for_interintra(MACROBLOCKD *xd,
void av1_build_intra_predictors_for_interintra(const AV1_COMMON *cm,
MACROBLOCKD *xd,
BLOCK_SIZE bsize, int plane,
BUFFER_SET *ctx, uint8_t *dst,
int dst_stride) {
......@@ -3124,7 +3125,7 @@ void av1_build_intra_predictors_for_interintra(MACROBLOCKD *xd,
PREDICTION_MODE mode =
interintra_to_intra_mode[xd->mi[0]->mbmi.interintra_mode];
av1_predict_intra_block(xd, pd->width, pd->height, plane_bsize, mode,
av1_predict_intra_block(cm, xd, pd->width, pd->height, plane_bsize, mode,
ctx->plane[plane], ctx->stride[plane], dst,
dst_stride, 0, 0, plane);
}
......@@ -3152,14 +3153,14 @@ void av1_combine_interintra(MACROBLOCKD *xd, BLOCK_SIZE bsize, int plane,
inter_pred, inter_stride, intra_pred, intra_stride);
}
void av1_build_interintra_predictors_sby(MACROBLOCKD *xd, uint8_t *ypred,
int ystride, BUFFER_SET *ctx,
BLOCK_SIZE bsize) {
void av1_build_interintra_predictors_sby(const AV1_COMMON *cm, MACROBLOCKD *xd,
uint8_t *ypred, int ystride,
BUFFER_SET *ctx, BLOCK_SIZE bsize) {
#if CONFIG_HIGHBITDEPTH
if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
DECLARE_ALIGNED(16, uint16_t, intrapredictor[MAX_SB_SQUARE]);
av1_build_intra_predictors_for_interintra(
xd, bsize, 0, ctx, CONVERT_TO_BYTEPTR(intrapredictor), MAX_SB_SIZE);
cm, xd, bsize, 0, ctx, CONVERT_TO_BYTEPTR(intrapredictor), MAX_SB_SIZE);
av1_combine_interintra(xd, bsize, 0, ypred, ystride,
CONVERT_TO_BYTEPTR(intrapredictor), MAX_SB_SIZE);
return;
......@@ -3167,21 +3168,22 @@ void av1_build_interintra_predictors_sby(MACROBLOCKD *xd, uint8_t *ypred,
#endif // CONFIG_HIGHBITDEPTH
{
DECLARE_ALIGNED(16, uint8_t, intrapredictor[MAX_SB_SQUARE]);
av1_build_intra_predictors_for_interintra(xd, bsize, 0, ctx, intrapredictor,
MAX_SB_SIZE);
av1_build_intra_predictors_for_interintra(cm, xd, bsize, 0, ctx,
intrapredictor, MAX_SB_SIZE);
av1_combine_interintra(xd, bsize, 0, ypred, ystride, intrapredictor,
MAX_SB_SIZE);
}
}
void av1_build_interintra_predictors_sbc(MACROBLOCKD *xd, uint8_t *upred,
int ustride, BUFFER_SET *ctx,
int plane, BLOCK_SIZE bsize) {
void av1_build_interintra_predictors_sbc(const AV1_COMMON *cm, MACROBLOCKD *xd,
uint8_t *upred, int ustride,
BUFFER_SET *ctx, int plane,
BLOCK_SIZE bsize) {
#if CONFIG_HIGHBITDEPTH
if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
DECLARE_ALIGNED(16, uint16_t, uintrapredictor[MAX_SB_SQUARE]);
av1_build_intra_predictors_for_interintra(
xd, bsize, plane, ctx, CONVERT_TO_BYTEPTR(uintrapredictor),
cm, xd, bsize, plane, ctx, CONVERT_TO_BYTEPTR(uintrapredictor),
MAX_SB_SIZE);
av1_combine_interintra(xd, bsize, plane, upred, ustride,
CONVERT_TO_BYTEPTR(uintrapredictor), MAX_SB_SIZE);
......@@ -3190,28 +3192,29 @@ void av1_build_interintra_predictors_sbc(MACROBLOCKD *xd, uint8_t *upred,
#endif // CONFIG_HIGHBITDEPTH
{
DECLARE_ALIGNED(16, uint8_t, uintrapredictor[MAX_SB_SQUARE]);
av1_build_intra_predictors_for_interintra(xd, bsize, plane, ctx,
av1_build_intra_predictors_for_interintra(cm, xd, bsize, plane, ctx,
uintrapredictor, MAX_SB_SIZE);
av1_combine_interintra(xd, bsize, plane, upred, ustride, uintrapredictor,
MAX_SB_SIZE);
}
}
void av1_build_interintra_predictors_sbuv(MACROBLOCKD *xd, uint8_t *upred,
uint8_t *vpred, int ustride,
int vstride, BUFFER_SET *ctx,
BLOCK_SIZE bsize) {
av1_build_interintra_predictors_sbc(xd, upred, ustride, ctx, 1, bsize);
av1_build_interintra_predictors_sbc(xd, vpred, vstride, ctx, 2, bsize);
void av1_build_interintra_predictors_sbuv(const AV1_COMMON *cm, MACROBLOCKD *xd,
uint8_t *upred, uint8_t *vpred,
int ustride, int vstride,
BUFFER_SET *ctx, BLOCK_SIZE bsize) {
av1_build_interintra_predictors_sbc(cm, xd, upred, ustride, ctx, 1, bsize);
av1_build_interintra_predictors_sbc(cm, xd, vpred, vstride, ctx, 2, bsize);
}
void av1_build_interintra_predictors(MACROBLOCKD *xd, uint8_t *ypred,
uint8_t *upred, uint8_t *vpred,
int ystride, int ustride, int vstride,
BUFFER_SET *ctx, BLOCK_SIZE bsize) {
av1_build_interintra_predictors_sby(xd, ypred, ystride, ctx, bsize);
av1_build_interintra_predictors_sbuv(xd, upred, vpred, ustride, vstride, ctx,
bsize);
void av1_build_interintra_predictors(const AV1_COMMON *cm, MACROBLOCKD *xd,
uint8_t *ypred, uint8_t *upred,
uint8_t *vpred, int ystride, int ustride,
int vstride, BUFFER_SET *ctx,
BLOCK_SIZE bsize) {
av1_build_interintra_predictors_sby(cm, xd, ypred, ystride, ctx, bsize);
av1_build_interintra_predictors_sbuv(cm, xd, upred, vpred, ustride, vstride,
ctx, bsize);
}
#endif // CONFIG_INTERINTRA
......
......@@ -673,26 +673,26 @@ const uint8_t *av1_get_compound_type_mask_inverse(
const uint8_t *av1_get_compound_type_mask(
const INTERINTER_COMPOUND_DATA *const comp_data, BLOCK_SIZE sb_type);
#if CONFIG_INTERINTRA
void av1_build_interintra_predictors(MACROBLOCKD *xd, uint8_t *ypred,
uint8_t *upred, uint8_t *vpred,
int ystride, int ustride, int vstride,
BUFFER_SET *ctx, BLOCK_SIZE bsize);
void av1_build_interintra_predictors_sby(MACROBLOCKD *xd, uint8_t *ypred,
int ystride, BUFFER_SET *ctx,
void av1_build_interintra_predictors(const AV1_COMMON *cm, MACROBLOCKD *xd,
uint8_t *ypred, uint8_t *upred,
uint8_t *vpred, int ystride, int ustride,
int vstride, BUFFER_SET *ctx,
BLOCK_SIZE bsize);
void av1_build_interintra_predictors_sby(const AV1_COMMON *cm, MACROBLOCKD *xd,
uint8_t *ypred, int ystride,
BUFFER_SET *ctx, BLOCK_SIZE bsize);
void av1_build_interintra_predictors_sbc(const AV1_COMMON *cm, MACROBLOCKD *xd,
uint8_t *upred, int ustride,
BUFFER_SET *ctx, int plane,
BLOCK_SIZE bsize);
void av1_build_interintra_predictors_sbc(MACROBLOCKD *xd, uint8_t *upred,
int ustride, BUFFER_SET *ctx,
int plane, BLOCK_SIZE bsize);
void av1_build_interintra_predictors_sbuv(MACROBLOCKD *xd, uint8_t *upred,
uint8_t *vpred, int ustride,
int vstride, BUFFER_SET *ctx,
BLOCK_SIZE bsize);
void av1_build_intra_predictors_for_interintra(MACROBLOCKD *xd,
BLOCK_SIZE bsize, int plane,
BUFFER_SET *ctx,
uint8_t *intra_pred,
int intra_stride);
void av1_build_interintra_predictors_sbuv(const AV1_COMMON *cm, MACROBLOCKD *xd,
uint8_t *upred, uint8_t *vpred,
int ustride, int vstride,
BUFFER_SET *ctx, BLOCK_SIZE bsize);
void av1_build_intra_predictors_for_interintra(
const AV1_COMMON *cm, MACROBLOCKD *xd, BLOCK_SIZE bsize, int plane,
BUFFER_SET *ctx, uint8_t *intra_pred, int intra_stride);
void av1_combine_interintra(MACROBLOCKD *xd, BLOCK_SIZE bsize, int plane,
const uint8_t *inter_pred, int inter_stride,
const uint8_t *intra_pred, int intra_stride);
......
......@@ -521,8 +521,8 @@ static const uint16_t *const orders_verta[BLOCK_SIZES] = {
#endif // CONFIG_EXT_PARTITION
#endif // CONFIG_EXT_PARTITION_TYPES
static int has_top_right(BLOCK_SIZE bsize, int mi_row, int mi_col,
int top_available, int right_available,
static int has_top_right(const AV1_COMMON *cm, BLOCK_SIZE bsize, int mi_row,
int mi_col, int top_available, int right_available,
#if CONFIG_EXT_PARTITION_TYPES
PARTITION_TYPE partition,
#endif
......@@ -561,8 +561,9 @@ static int has_top_right(BLOCK_SIZE bsize, int mi_row, int mi_col,
const int bw_in_mi_log2 = mi_width_log2_lookup[bsize];
const int bh_in_mi_log2 = mi_height_log2_lookup[bsize];
const int blk_row_in_sb = (mi_row & MAX_MIB_MASK) >> bh_in_mi_log2;
const int blk_col_in_sb = (mi_col & MAX_MIB_MASK) >> bw_in_mi_log2;
const int sb_mi_size = mi_size_high[cm->sb_size];
const int blk_row_in_sb = (mi_row & (sb_mi_size - 1)) >> bh_in_mi_log2;
const int blk_col_in_sb = (mi_col & (sb_mi_size - 1)) >> bw_in_mi_log2;
// Top row of superblock: so top-right pixels are in the top and/or
// top-right superblocks, both of which are already available.
......@@ -570,7 +571,7 @@ static int has_top_right(BLOCK_SIZE bsize, int mi_row, int mi_col,
// Rightmost column of superblock (and not the top row): so top-right pixels
// fall in the right superblock, which is not available yet.
if (((blk_col_in_sb + 1) << bw_in_mi_log2) >= MAX_MIB_SIZE) return 0;
if (((blk_col_in_sb + 1) << bw_in_mi_log2) >= sb_mi_size) return 0;
// General case (neither top row nor rightmost column): check if the
// top-right block is coded before the current block.
......@@ -591,8 +592,8 @@ static int has_top_right(BLOCK_SIZE bsize, int mi_row, int mi_col,
}
}
static int has_bottom_left(BLOCK_SIZE bsize, int mi_row, int mi_col,
int bottom_available, int left_available,
static int has_bottom_left(const AV1_COMMON *cm, BLOCK_SIZE bsize, int mi_row,
int mi_col, int bottom_available, int left_available,
TX_SIZE txsz, int row_off, int col_off, int ss_y) {
if (!bottom_available || !left_available) return 0;
......@@ -614,8 +615,9 @@ static int has_bottom_left(BLOCK_SIZE bsize, int mi_row, int mi_col,
const int bw_in_mi_log2 = mi_width_log2_lookup[bsize];
const int bh_in_mi_log2 = mi_height_log2_lookup[bsize];
const int blk_row_in_sb = (mi_row & MAX_MIB_MASK) >> bh_in_mi_log2;
const int blk_col_in_sb = (mi_col & MAX_MIB_MASK) >> bw_in_mi_log2;
const int sb_mi_size = mi_size_high[cm->sb_size];
const int blk_row_in_sb = (mi_row & (sb_mi_size - 1)) >> bh_in_mi_log2;
const int blk_col_in_sb = (mi_col & (sb_mi_size - 1)) >> bw_in_mi_log2;
// Leftmost column of superblock: so bottom-left pixels maybe in the left
// and/or bottom-left superblocks. But only the left superblock is
......@@ -627,13 +629,13 @@ static int has_bottom_left(BLOCK_SIZE bsize, int mi_row, int mi_col,
ss_y;
const int row_off_in_sb = blk_start_row_off + row_off;
const int sb_height_unit =
MAX_MIB_SIZE << (MI_SIZE_LOG2 - tx_size_wide_log2[0]) >> ss_y;
sb_mi_size << (MI_SIZE_LOG2 - tx_size_wide_log2[0]) >> ss_y;
return row_off_in_sb + bottom_left_count_unit < sb_height_unit;
}
// Bottom row of superblock (and not the leftmost column): so bottom-left
// pixels fall in the bottom superblock, which is not available yet.
if (((blk_row_in_sb + 1) << bh_in_mi_log2) >= MAX_MIB_SIZE) return 0;
if (((blk_row_in_sb + 1) << bh_in_mi_log2) >= sb_mi_size) return 0;
// General case (neither leftmost column nor bottom row): check if the
// bottom-left block is coded before the current block.
......@@ -2917,7 +2919,8 @@ static void build_intra_predictors(const MACROBLOCKD *xd, const uint8_t *ref,
}
}
static void predict_intra_block_helper(const MACROBLOCKD *xd, int wpx, int hpx,
static void predict_intra_block_helper(const AV1_COMMON *cm,
const MACROBLOCKD *xd, int wpx, int hpx,
TX_SIZE tx_size, PREDICTION_MODE mode,
const uint8_t *ref, int ref_stride,
uint8_t *dst, int dst_stride,
......@@ -2974,13 +2977,13 @@ static void predict_intra_block_helper(const MACROBLOCKD *xd, int wpx, int hpx,
#endif
const int have_top_right =
has_top_right(bsize, mi_row, mi_col, have_top, right_available,
has_top_right(cm, bsize, mi_row, mi_col, have_top, right_available,
#if CONFIG_EXT_PARTITION_TYPES
partition,
#endif
tx_size, row_off, col_off, pd->subsampling_x);
const int have_bottom_left =
has_bottom_left(bsize, mi_row, mi_col, bottom_available, have_left,
has_bottom_left(cm, bsize, mi_row, mi_col, bottom_available, have_left,
tx_size, row_off, col_off, pd->subsampling_y);
if (xd->mi[0]->mbmi.palette_mode_info.palette_size[plane != 0] > 0) {
const int stride = wpx;
......@@ -3029,8 +3032,9 @@ static void predict_intra_block_helper(const MACROBLOCKD *xd, int wpx, int hpx,
have_bottom_left ? AOMMIN(txhpx, yd) : 0, plane);
}
void av1_predict_intra_block_facade(MACROBLOCKD *xd, int plane, int block_idx,
int blk_col, int blk_row, TX_SIZE tx_size) {
void av1_predict_intra_block_facade(const AV1_COMMON *cm, MACROBLOCKD *xd,
int plane, int block_idx, int blk_col,
int blk_row, TX_SIZE tx_size) {
const MODE_INFO *mi = xd->mi[0];
const MB_MODE_INFO *const mbmi = &mi->mbmi;
struct macroblockd_plane *const pd = &xd->plane[plane];
......@@ -3058,9 +3062,9 @@ void av1_predict_intra_block_facade(MACROBLOCKD *xd, int plane, int block_idx,
}
#endif
av1_predict_intra_block(xd, pd->width, pd->height, txsize_to_bsize[tx_size],
mode, dst, dst_stride, dst, dst_stride, blk_col,
blk_row, plane);
av1_predict_intra_block(cm, xd, pd->width, pd->height,
txsize_to_bsize[tx_size], mode, dst, dst_stride, dst,
dst_stride, blk_col, blk_row, plane);
}
#if INTRA_USES_EXT_TRANSFORMS
......@@ -3168,11 +3172,11 @@ static void restore_ref_col(int buf_flags, int block_height,
}
#endif // #if INTRA_USES_EXT_TRANSFORMS
void av1_predict_intra_block(const MACROBLOCKD *xd, int wpx, int hpx,
BLOCK_SIZE bsize, PREDICTION_MODE mode,
const uint8_t *ref, int ref_stride, uint8_t *dst,
int dst_stride, int col_off, int row_off,
int plane) {
void av1_predict_intra_block(const AV1_COMMON *cm, const MACROBLOCKD *xd,
int wpx, int hpx, BLOCK_SIZE bsize,
PREDICTION_MODE mode, const uint8_t *ref,
int ref_stride, uint8_t *dst, int dst_stride,
int col_off, int row_off, int plane) {
const int block_width = block_size_wide[bsize];
const int block_height = block_size_high[bsize];
#if INTRA_USES_RECT_TRANSFORMS
......@@ -3186,8 +3190,8 @@ void av1_predict_intra_block(const MACROBLOCKD *xd, int wpx, int hpx,
// Start by running the helper to predict either the entire block
// (if the block is square or the same size as tx_size) or the top
// or left of the block if it's tall and thin or short and wide.
predict_intra_block_helper(xd, wpx, hpx, tx_size, mode, ref, ref_stride, dst,
dst_stride, col_off, row_off, plane);
predict_intra_block_helper(cm, xd, wpx, hpx, tx_size, mode, ref, ref_stride,
dst, dst_stride, col_off, row_off, plane);
// If we're not using extended transforms, this function should
// always be called with a square block.
......@@ -3252,7 +3256,7 @@ void av1_predict_intra_block(const MACROBLOCKD *xd, int wpx, int hpx,
const uint8_t *next_ref_row = ref + next_row_idx * ref_stride;
uint8_t *next_dst_row = dst + next_row_idx * dst_stride;
predict_intra_block_helper(xd, wpx, hpx, tx_size, mode, next_ref_row,
predict_intra_block_helper(cm, xd, wpx, hpx, tx_size, mode, next_ref_row,
ref_stride, next_dst_row, dst_stride, col_off,
next_row_off, plane);
......@@ -3289,7 +3293,7 @@ void av1_predict_intra_block(const MACROBLOCKD *xd, int wpx, int hpx,
const uint8_t *next_ref_col = ref + next_col_idx;
uint8_t *next_dst_col = dst + next_col_idx;
predict_intra_block_helper(xd, wpx, hpx, tx_size, mode, next_ref_col,
predict_intra_block_helper(cm, xd, wpx, hpx, tx_size, mode, next_ref_col,
ref_stride, next_dst_col, dst_stride,
next_col_off, row_off, plane);
......
......@@ -14,18 +14,21 @@
#include "aom/aom_integer.h"
#include "av1/common/blockd.h"
#include "av1/common/onyxc_int.h"
#ifdef __cplusplus
extern "C" {
#endif
void av1_init_intra_predictors(void);
void av1_predict_intra_block_facade(MACROBLOCKD *xd, int plane, int block_idx,
int blk_col, int blk_row, TX_SIZE tx_size);
void av1_predict_intra_block(const MACROBLOCKD *xd, int bw, int bh,
BLOCK_SIZE bsize, PREDICTION_MODE mode,
const uint8_t *ref, int ref_stride, uint8_t *dst,
int dst_stride, int aoff, int loff, int plane);
void av1_predict_intra_block_facade(const AV1_COMMON *cm, MACROBLOCKD *xd,
int plane, int block_idx, int blk_col,
int blk_row, TX_SIZE tx_size);
void av1_predict_intra_block(const AV1_COMMON *cm, const MACROBLOCKD *xd,
int bw, int bh, BLOCK_SIZE bsize,
PREDICTION_MODE mode, const uint8_t *ref,
int ref_stride, uint8_t *dst, int dst_stride,
int aoff, int loff, int plane);
#if CONFIG_EXT_INTER && CONFIG_INTERINTRA
// Mapping of interintra to intra mode for use in the intra component
......
......@@ -479,7 +479,7 @@ static void predict_and_reconstruct_intra_block(
#if CONFIG_PVQ
(void)r;
#endif
av1_predict_intra_block_facade(xd, plane, block_idx, col, row, tx_size);
av1_predict_intra_block_facade(cm, xd, plane, block_idx, col, row, tx_size);
if (!mbmi->skip) {
#if !CONFIG_PVQ
......
......@@ -1077,7 +1077,8 @@ void av1_encode_block_intra(int plane, int block, int blk_row, int blk_col,
uint8_t *dst =
&pd->dst.buf[(blk_row * dst_stride + blk_col) << tx_size_wide_log2[0]];
av1_predict_intra_block_facade(xd, plane, block, blk_col, blk_row, tx_size);
av1_predict_intra_block_facade(cm, xd, plane, block, blk_col, blk_row,
tx_size);
av1_subtract_txb(x, plane, plane_bsize, blk_col, blk_row, tx_size);
......
......@@ -143,6 +143,7 @@ static int do_16x16_zerozero_search(AV1_COMP *cpi, int_mv *dst_mv) {
return err;
}
static int find_best_16x16_intra(AV1_COMP *cpi, PREDICTION_MODE *pbest_mode) {
const AV1_COMMON *cm = &cpi->common;
MACROBLOCK *const x = &cpi->td.mb;
MACROBLOCKD *const xd = &x->e_mbd;
PREDICTION_MODE best_mode = -1, mode;
......@@ -154,9 +155,10 @@ static int find_best_16x16_intra(AV1_COMP *cpi, PREDICTION_MODE *pbest_mode) {
unsigned int err;
xd->mi[0]->mbmi.mode = mode;
av1_predict_intra_block(xd, 16, 16, BLOCK_16X16, mode, x->plane[0].src.buf,
x->plane[0].src.stride, xd->plane[0].dst.buf,
xd->plane[0].dst.stride, 0, 0, 0);
av1_predict_intra_block(cm, xd, 16, 16, BLOCK_16X16, mode,
x->plane[0].src.buf, x->plane[0].src.stride,
xd->plane[0].dst.buf, xd->plane[0].dst.stride, 0, 0,
0);
err = aom_sad16x16(x->plane[0].src.buf, x->plane[0].src.stride,
xd->plane[0].dst.buf, xd->plane[0].dst.stride);
......
......@@ -1991,9 +1991,7 @@ static void block_rd_txfm(int plane, int block, int blk_row, int blk_col,
const AV1_COMP *cpi = args->cpi;
ENTROPY_CONTEXT *a = args->t_above + blk_col;
ENTROPY_CONTEXT *l = args->t_left + blk_row;
#if !CONFIG_TXK_SEL
const AV1_COMMON *cm = &cpi->common;
#endif
int64_t rd1, rd2, rd;
RD_STATS this_rd_stats;
......@@ -2019,7 +2017,8 @@ static void block_rd_txfm(int plane, int block, int blk_row, int blk_col,
if (args->exit_early) return;
if (!is_inter_block(mbmi)) {
av1_predict_intra_block_facade(xd, plane, block, blk_col, blk_row, tx_size);
av1_predict_intra_block_facade(cm, xd, plane, block, blk_col, blk_row,
tx_size);
av1_subtract_txb(x, plane, plane_bsize, blk_col, blk_row, tx_size);
}
......@@ -2893,6 +2892,7 @@ static int conditional_skipintra(PREDICTION_MODE mode,
// Model based RD estimation for luma intra blocks.
static int64_t intra_model_yrd(const AV1_COMP *const cpi, MACROBLOCK *const x,
BLOCK_SIZE bsize, int mode_cost) {
const AV1_COMMON *cm = &cpi->common;
MACROBLOCKD *const xd = &x->e_mbd;
MB_MODE_INFO *const mbmi = &xd->mi[0]->mbmi;
assert(!is_inter_block(mbmi));
......@@ -2910,7 +2910,7 @@ static int64_t intra_model_yrd(const AV1_COMP *const cpi, MACROBLOCK *const x,
int block = 0;
for (row = 0; row < max_blocks_high; row += stepr) {
for (col = 0; col < max_blocks_wide; col += stepc) {
av1_predict_intra_block_facade(xd, 0, block, col, row, tx_size);
av1_predict_intra_block_facade(cm, xd, 0, block, col, row, tx_size);
block += step;
}
}
......@@ -3282,8 +3282,8 @@ static int64_t rd_pick_intra_sub_8x8_y_subblock_mode(
block == 0 || block == 2));
xd->mi[0]->bmi[block_raster_idx].as_mode = mode;
av1_predict_intra_block(
xd, pd->width, pd->height, txsize_to_bsize[tx_size], mode, dst,
dst_stride, dst, dst_stride, col + idx, row + idy, 0);
cm, xd, pd->width, pd->height, txsize_to_bsize[tx_size], mode,
dst, dst_stride, dst, dst_stride, col + idx, row + idy, 0);
#if !CONFIG_PVQ
aom_highbd_subtract_block(tx_height, tx_width, src_diff, 8, src,
src_stride, dst, dst_stride, xd->bd);
......@@ -3490,7 +3490,7 @@ static int64_t rd_pick_intra_sub_8x8_y_subblock_mode(
assert(IMPLIES(tx_size == TX_4X8 || tx_size == TX_8X4,
block == 0 || block == 2));
xd->mi[0]->bmi[block_raster_idx].as_mode = mode;
av1_predict_intra_block(xd, pd->width, pd->height,
av1_predict_intra_block(cm, xd, pd->width, pd->height,
txsize_to_bsize[tx_size], mode, dst, dst_stride,
dst, dst_stride,
#if CONFIG_CB4X4
......@@ -9013,7 +9013,6 @@ static int64_t handle_inter_mode(const AV1_COMP *const cpi, MACROBLOCK *x,
HandleInterModeArgs *args,
const int64_t ref_best_rd) {
const AV1_COMMON *cm = &cpi->common;
(void)cm;
MACROBLOCKD *xd = &x->e_mbd;
MODE_INFO *mi = xd->mi[0];
MB_MODE_INFO *mbmi = &mi->mbmi;
......@@ -9546,7 +9545,7 @@ static int64_t handle_inter_mode(const AV1_COMP *const cpi, MACROBLOCK *x,
for (j = 0; j < INTERINTRA_MODES; ++j) {
mbmi->interintra_mode = (INTERINTRA_MODE)j;
rmode = interintra_mode_cost[mbmi->interintra_mode];
av1_build_intra_predictors_for_interintra(xd, bsize, 0, &orig_dst,
av1_build_intra_predictors_for_interintra(cm, xd, bsize, 0, &orig_dst,
intrapred, bw);
av1_combine_interintra(xd, bsize, 0, tmp_buf, bw, intrapred, bw);
model_rd_for_sb(cpi, bsize, x, xd, 0, 0, &rate_sum, &dist_sum,
......@@ -9559,7 +9558,7 @@ static int64_t handle_inter_mode(const AV1_COMP *const cpi, MACROBLOCK *x,
}
mbmi->interintra_mode = best_interintra_mode;
rmode = interintra_mode_cost[mbmi->interintra_mode];
av1_build_intra_predictors_for_interintra(xd, bsize, 0, &orig_dst,
av1_build_intra_predictors_for_interintra(cm, xd, bsize, 0, &orig_dst,
intrapred, bw);
av1_combine_interintra(xd, bsize, 0, tmp_buf, bw, intrapred, bw);
av1_subtract_plane(x, bsize, 0);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment