Commit 6ea917ec authored by Debargha Mukherjee's avatar Debargha Mukherjee

Remove CONFIG_CB4X4 config flags

Since CB4X4 is adopted and without it the codec does not work,
it is better to remove it and simplify the code.

Change-Id: I51019312846928069727967e3b2bbb60f0fba80d
parent a3d4fe50
......@@ -25,13 +25,8 @@ int av1_get_MBs(int width, int height) {
const int mi_cols = aligned_width >> MI_SIZE_LOG2;
const int mi_rows = aligned_height >> MI_SIZE_LOG2;
#if CONFIG_CB4X4
const int mb_cols = (mi_cols + 2) >> 2;
const int mb_rows = (mi_rows + 2) >> 2;
#else
const int mb_cols = (mi_cols + 1) >> 1;
const int mb_rows = (mi_rows + 1) >> 1;
#endif
return mb_rows * mb_cols;
}
......@@ -48,13 +43,8 @@ void av1_set_mb_mi(AV1_COMMON *cm, int width, int height) {
cm->mi_rows = aligned_height >> MI_SIZE_LOG2;
cm->mi_stride = calc_mi_size(cm->mi_cols);
#if CONFIG_CB4X4
cm->mb_cols = (cm->mi_cols + 2) >> 2;
cm->mb_rows = (cm->mi_rows + 2) >> 2;
#else
cm->mb_cols = (cm->mi_cols + 1) >> 1;
cm->mb_rows = (cm->mi_rows + 1) >> 1;
#endif
cm->MBs = cm->mb_rows * cm->mb_cols;
}
......
......@@ -2981,13 +2981,6 @@ static void set_lpf_parameters(
}
}
#if !CONFIG_CB4X4
// prepare internal edge parameters
if (curr_level && !curr_skipped) {
params->filter_length_internal = (TX_4X4 >= ts) ? (4) : (0);
}
#endif
// prepare common parameters
if (params->filter_length || params->filter_length_internal) {
const loop_filter_thresh *const limits = cm->lf_info.lfthr + level;
......@@ -3474,9 +3467,6 @@ void av1_loop_filter_rows(YV12_BUFFER_CONFIG *frame_buffer, AV1_COMMON *cm,
int mi_row, mi_col;
int plane;
#if CONFIG_VAR_TX || CONFIG_EXT_PARTITION || CONFIG_EXT_PARTITION_TYPES || \
CONFIG_CB4X4
#if !CONFIG_PARALLEL_DEBLOCKING
#if CONFIG_VAR_TX
for (int i = 0; i < MAX_MB_PLANE; ++i)
......@@ -3521,75 +3511,7 @@ void av1_loop_filter_rows(YV12_BUFFER_CONFIG *frame_buffer, AV1_COMMON *cm,
}
}
}
#endif // CONFIG_PARALLEL_DEBLOCKING
#else // CONFIG_VAR_TX || CONFIG_EXT_PARTITION || CONFIG_EXT_PARTITION_TYPES
#if CONFIG_PARALLEL_DEBLOCKING
for (mi_row = start; mi_row < stop; mi_row += MAX_MIB_SIZE) {
for (mi_col = 0; mi_col < cm->mi_cols; mi_col += MAX_MIB_SIZE) {
av1_setup_dst_planes(planes, cm->sb_size, frame_buffer, mi_row, mi_col);
// filter all vertical edges in every 64x64 super block
for (plane = plane_start; plane < plane_end; plane += 1) {
av1_filter_block_plane_vert(cm, plane, &planes[plane], mi_row, mi_col);
}
}
}
for (mi_row = start; mi_row < stop; mi_row += MAX_MIB_SIZE) {
for (mi_col = 0; mi_col < cm->mi_cols; mi_col += MAX_MIB_SIZE) {
av1_setup_dst_planes(planes, cm->sb_size, frame_buffer, mi_row, mi_col);
// filter all horizontal edges in every 64x64 super block
for (plane = plane_start; plane < plane_end; plane += 1) {
av1_filter_block_plane_horz(cm, plane, &planes[plane], mi_row, mi_col);
}
}
}
#else // CONFIG_PARALLEL_DEBLOCKING
enum lf_path path;
LOOP_FILTER_MASK lfm;
if (y_only)
path = LF_PATH_444;
else if (planes[1].subsampling_y == 1 && planes[1].subsampling_x == 1)
path = LF_PATH_420;
else if (planes[1].subsampling_y == 0 && planes[1].subsampling_x == 0)
path = LF_PATH_444;
else
path = LF_PATH_SLOW;
for (mi_row = start; mi_row < stop; mi_row += MAX_MIB_SIZE) {
MODE_INFO **mi = cm->mi_grid_visible + mi_row * cm->mi_stride;
for (mi_col = 0; mi_col < cm->mi_cols; mi_col += MAX_MIB_SIZE) {
av1_setup_dst_planes(planes, cm->sb_size, frame_buffer, mi_row, mi_col);
// TODO(JBB): Make setup_mask work for non 420.
av1_setup_mask(cm, mi_row, mi_col, mi + mi_col, cm->mi_stride, &lfm);
av1_filter_block_plane_ss00_ver(cm, &planes[0], mi_row, &lfm);
av1_filter_block_plane_ss00_hor(cm, &planes[0], mi_row, &lfm);
for (plane = 1; plane < num_planes; ++plane) {
switch (path) {
case LF_PATH_420:
av1_filter_block_plane_ss11_ver(cm, &planes[plane], mi_row, &lfm);
av1_filter_block_plane_ss11_hor(cm, &planes[plane], mi_row, &lfm);
break;
case LF_PATH_444:
av1_filter_block_plane_ss00_ver(cm, &planes[plane], mi_row, &lfm);
av1_filter_block_plane_ss00_hor(cm, &planes[plane], mi_row, &lfm);
break;
case LF_PATH_SLOW:
av1_filter_block_plane_non420_ver(cm, &planes[plane], mi + mi_col,
mi_row, mi_col, plane);
av1_filter_block_plane_non420_hor(cm, &planes[plane], mi + mi_col,
mi_row, mi_col, plane);
break;
}
}
}
}
#endif // CONFIG_PARALLEL_DEBLOCKING
#endif // CONFIG_VAR_TX || CONFIG_EXT_PARTITION || CONFIG_EXT_PARTITION_TYPES
#endif // !CONFIG_PARALLEL_DEBLOCKING
}
void av1_loop_filter_frame(YV12_BUFFER_CONFIG *frame, AV1_COMMON *cm,
......
......@@ -178,15 +178,10 @@ void av1_foreach_transformed_block(const MACROBLOCKD *const xd,
int plane;
for (plane = 0; plane < MAX_MB_PLANE; ++plane) {
#if CONFIG_CB4X4
if (!is_chroma_reference(mi_row, mi_col, bsize,
xd->plane[plane].subsampling_x,
xd->plane[plane].subsampling_y))
continue;
#else
(void)mi_row;
(void)mi_col;
#endif
av1_foreach_transformed_block_in_plane(xd, bsize, plane, visit, arg);
}
}
......@@ -200,11 +195,7 @@ void av1_set_contexts(const MACROBLOCKD *xd, struct macroblockd_plane *pd,
ENTROPY_CONTEXT *const l = pd->left_context + loff;
const int txs_wide = tx_size_wide_unit[tx_size];
const int txs_high = tx_size_high_unit[tx_size];
#if CONFIG_CB4X4
const BLOCK_SIZE bsize = xd->mi[0]->mbmi.sb_type;
#else
const BLOCK_SIZE bsize = AOMMAX(xd->mi[0]->mbmi.sb_type, BLOCK_8X8);
#endif
const BLOCK_SIZE plane_bsize = get_plane_block_size(bsize, pd);
// above
......@@ -240,17 +231,11 @@ void av1_reset_skip_context(MACROBLOCKD *xd, int mi_row, int mi_col,
BLOCK_SIZE bsize) {
int i;
int nplanes;
#if CONFIG_CB4X4
int chroma_ref;
chroma_ref =
is_chroma_reference(mi_row, mi_col, bsize, xd->plane[1].subsampling_x,
xd->plane[1].subsampling_y);
nplanes = 1 + (MAX_MB_PLANE - 1) * chroma_ref;
#else
(void)mi_row;
(void)mi_col;
nplanes = MAX_MB_PLANE;
#endif
for (i = 0; i < nplanes; i++) {
struct macroblockd_plane *const pd = &xd->plane[i];
#if CONFIG_CHROMA_SUB8X8
......
......@@ -478,12 +478,8 @@ static INLINE int is_intrabc_block(const MB_MODE_INFO *mbmi) {
#endif
static INLINE PREDICTION_MODE get_y_mode(const MODE_INFO *mi, int block) {
#if CONFIG_CB4X4
(void)block;
return mi->mbmi.mode;
#else
return mi->mbmi.sb_type < BLOCK_8X8 ? mi->bmi[block].as_mode : mi->mbmi.mode;
#endif
}
#if CONFIG_CFL
......@@ -687,7 +683,6 @@ typedef struct cfl_ctx {
// Whether the reconstructed luma pixels need to be stored
int store_y;
#if CONFIG_CB4X4
int is_chroma_reference;
#if CONFIG_CHROMA_SUB8X8 && CONFIG_DEBUG
// The prediction used for sub8x8 blocks originates from multiple luma blocks,
......@@ -695,7 +690,6 @@ typedef struct cfl_ctx {
// each luma block
uint8_t sub8x8_val[CFL_SUB8X8_VAL_MI_SQUARE];
#endif // CONFIG_CHROMA_SUB8X8 && CONFIG_DEBUG
#endif // CONFIG_CB4X4
} CFL_CTX;
#endif // CONFIG_CFL
......@@ -875,7 +869,7 @@ static INLINE int is_rect_tx(TX_SIZE tx_size) { return tx_size >= TX_SIZES; }
#endif // CONFIG_RECT_TX
static INLINE int block_signals_txsize(BLOCK_SIZE bsize) {
#if CONFIG_CB4X4 && (CONFIG_VAR_TX || CONFIG_EXT_TX) && CONFIG_RECT_TX
#if (CONFIG_VAR_TX || CONFIG_EXT_TX) && CONFIG_RECT_TX
return bsize > BLOCK_4X4;
#else
return bsize >= BLOCK_8X8;
......@@ -975,12 +969,12 @@ static INLINE TxSetType get_ext_tx_set_type(TX_SIZE tx_size, BLOCK_SIZE bs,
int is_inter, int use_reduced_set) {
const TX_SIZE tx_size_sqr_up = txsize_sqr_up_map[tx_size];
const TX_SIZE tx_size_sqr = txsize_sqr_map[tx_size];
#if CONFIG_CB4X4 && USE_TXTYPE_SEARCH_FOR_SUB8X8_IN_CB4X4
#if USE_TXTYPE_SEARCH_FOR_SUB8X8_IN_CB4X4
(void)bs;
if (tx_size_sqr_up > TX_32X32) return EXT_TX_SET_DCTONLY;
#else
if (tx_size_sqr_up > TX_32X32 || bs < BLOCK_8X8) return EXT_TX_SET_DCTONLY;
#endif
#endif // USE_TXTYPE_SEARCH_FOR_SUB8X8_IN_CB4X4
if (use_reduced_set)
return is_inter ? EXT_TX_SET_DCT_IDTX : EXT_TX_SET_DTT4_IDTX;
#if CONFIG_MRC_TX
......@@ -1173,13 +1167,8 @@ static INLINE TX_SIZE tx_size_from_tx_mode(BLOCK_SIZE bsize, TX_MODE tx_mode,
#endif // (CONFIG_VAR_TX || CONFIG_EXT_TX) && CONFIG_RECT_TX
(void)is_inter;
#if CONFIG_VAR_TX && CONFIG_RECT_TX
#if CONFIG_CB4X4
if (bsize == BLOCK_4X4)
return AOMMIN(max_txsize_lookup[bsize], largest_tx_size);
#else
if (bsize < BLOCK_8X8)
return AOMMIN(max_txsize_lookup[bsize], largest_tx_size);
#endif
if (txsize_sqr_map[max_rect_tx_size] <= largest_tx_size)
return max_rect_tx_size;
else
......@@ -1297,7 +1286,6 @@ static INLINE TX_TYPE av1_get_tx_type(PLANE_TYPE plane_type,
if (xd->lossless[mbmi->segment_id] || txsize_sqr_map[tx_size] > TX_32X32 ||
(txsize_sqr_map[tx_size] >= TX_32X32 && !is_inter_block(mbmi)))
return DCT_DCT;
if (mbmi->sb_type >= BLOCK_8X8 || CONFIG_CB4X4) {
if (plane_type == PLANE_TYPE_Y) {
#if !ALLOW_INTRA_EXT_TX
if (is_inter_block(mbmi))
......@@ -1311,23 +1299,9 @@ static INLINE TX_TYPE av1_get_tx_type(PLANE_TYPE plane_type,
? DCT_DCT
: mbmi->tx_type;
}
}
#if CONFIG_CB4X4
(void)block;
return intra_mode_to_tx_type_context[get_uv_mode(mbmi->uv_mode)];
#else // CONFIG_CB4X4
// Sub8x8-Inter/Intra OR UV-Intra
if (is_inter_block(mbmi)) { // Sub8x8-Inter
return DCT_DCT;
} else { // Sub8x8 Intra OR UV-Intra
const int block_raster_idx =
av1_block_index_to_raster_order(tx_size, block);
return intra_mode_to_tx_type_context[plane_type == PLANE_TYPE_Y
? get_y_mode(mi, block_raster_idx)
: get_uv_mode(mbmi->uv_mode)];
}
#endif // CONFIG_CB4X4
#else // CONFIG_EXT_TX
(void)block;
#if CONFIG_MRC_TX
......@@ -1463,14 +1437,8 @@ static INLINE int is_interintra_pred(const MB_MODE_INFO *mbmi) {
#if CONFIG_VAR_TX
static INLINE int get_vartx_max_txsize(const MB_MODE_INFO *const mbmi,
BLOCK_SIZE bsize, int subsampled) {
#if CONFIG_CB4X4
(void)mbmi;
TX_SIZE max_txsize = max_txsize_rect_lookup[bsize];
#else
TX_SIZE max_txsize = mbmi->sb_type < BLOCK_8X8
? max_txsize_rect_lookup[mbmi->sb_type]
: max_txsize_rect_lookup[bsize];
#endif // CONFIG_C4X4
#if CONFIG_EXT_PARTITION && CONFIG_TX64X64
// The decoder is designed so that it can process 64x64 luma pixels at a
......@@ -1662,11 +1630,7 @@ static INLINE int is_nontrans_global_motion(const MACROBLOCKD *xd) {
const MODE_INFO *mi = xd->mi[0];
const MB_MODE_INFO *const mbmi = &mi->mbmi;
int ref;
#if CONFIG_CB4X4
const int unify_bsize = 1;
#else
const int unify_bsize = 0;
#endif
// First check if all modes are ZEROMV
if (mbmi->sb_type >= BLOCK_8X8 || unify_bsize) {
......
......@@ -61,7 +61,6 @@ static const uint8_t b_height_log2_lookup[BLOCK_SIZES_ALL] = {
};
// Log 2 conversion lookup tables for modeinfo width and height
static const uint8_t mi_width_log2_lookup[BLOCK_SIZES_ALL] = {
#if CONFIG_CB4X4
#if CONFIG_CHROMA_SUB8X8
0, 0,
0,
......@@ -76,21 +75,8 @@ static const uint8_t mi_width_log2_lookup[BLOCK_SIZES_ALL] = {
2, 1,
3, 2,
4, IF_EXT_PARTITION(3, 5)
#else // CONFIG_CB4X4
0, 0,
0, 0,
0, 1,
1, 1,
2, 2,
2, 3,
3, IF_EXT_PARTITION(3, 4, 4) 0,
1, 0,
2, 1,
3, IF_EXT_PARTITION(2, 4)
#endif
};
static const uint8_t mi_height_log2_lookup[BLOCK_SIZES_ALL] = {
#if CONFIG_CB4X4
#if CONFIG_CHROMA_SUB8X8
0, 0,
0,
......@@ -105,44 +91,22 @@ static const uint8_t mi_height_log2_lookup[BLOCK_SIZES_ALL] = {
0, 3,
1, 4,
2, IF_EXT_PARTITION(5, 3)
#else // CONFIG_CB4X4
0, 0,
0, 0,
1, 0,
1, 2,
1, 2,
3, 2,
3, IF_EXT_PARTITION(4, 3, 4) 1,
0, 2,
0, 3,
1, IF_EXT_PARTITION(2, 4)
#endif
};
/* clang-format off */
static const uint8_t mi_size_wide[BLOCK_SIZES_ALL] = {
#if CONFIG_CB4X4
#if CONFIG_CHROMA_SUB8X8
1, 1, 1,
#endif
1, 1, 2, 2, 2, 4, 4, 4, 8, 8, 8, 16, 16,
IF_EXT_PARTITION(16, 32, 32) 1, 4, 2, 8, 4, 16, IF_EXT_PARTITION(8, 32)
#else // CONFIG_CB4X4
1, 1, 1, 1, 1, 2, 2, 2, 4, 4, 4, 8, 8, IF_EXT_PARTITION(8, 16, 16) 1, 2, 1, 4,
2, 8, IF_EXT_PARTITION(4, 16)
#endif
};
static const uint8_t mi_size_high[BLOCK_SIZES_ALL] = {
#if CONFIG_CB4X4
#if CONFIG_CHROMA_SUB8X8
1, 1, 1,
#endif
1, 2, 1, 2, 4, 2, 4, 8, 4, 8, 16, 8, 16,
IF_EXT_PARTITION(32, 16, 32) 4, 1, 8, 2, 16, 4, IF_EXT_PARTITION(32, 8)
#else // CONFIG_CB4X4
1, 1, 1, 1, 2, 1, 2, 4, 2, 4, 8, 4, 8, IF_EXT_PARTITION(16, 8, 16) 2, 1, 4, 1,
8, 2, IF_EXT_PARTITION(16, 4)
#endif
};
/* clang-format on */
......@@ -748,7 +712,6 @@ static const TX_TYPE_1D htx_tab[TX_TYPES] = {
// block which may use a rectangular transform, in which case it is
// "(max_txsize_lookup[bsize] + 1) - TX_8X8", invalid for bsize < 8X8
static const int32_t intra_tx_size_cat_lookup[BLOCK_SIZES_ALL] = {
#if CONFIG_CB4X4
#if CONFIG_CHROMA_SUB8X8
// 2X2, 2X4, 4X2,
INT32_MIN, INT32_MIN, INT32_MIN,
......@@ -757,12 +720,6 @@ static const int32_t intra_tx_size_cat_lookup[BLOCK_SIZES_ALL] = {
INT32_MIN,
// 4X8, 8X4, 8X8,
TX_8X8 - TX_8X8, TX_8X8 - TX_8X8, TX_8X8 - TX_8X8,
#else // CONFIG_CB4X4
// 4X4
INT32_MIN,
// 4X8, 8X4, 8X8
INT32_MIN, INT32_MIN, TX_8X8 - TX_8X8,
#endif // CONFIG_CB4X4
// 8X16, 16X8, 16X16
TX_16X16 - TX_8X8, TX_16X16 - TX_8X8, TX_16X16 - TX_8X8,
// 16X32, 32X16, 32X32
......@@ -1098,15 +1055,11 @@ static const BLOCK_SIZE ss_size_lookup[BLOCK_SIZES_ALL][2][2] = {
{ { BLOCK_4X4, BLOCK_4X2 }, { BLOCK_2X4, BLOCK_2X2 } },
{ { BLOCK_4X8, BLOCK_4X4 }, { BLOCK_INVALID, BLOCK_2X4 } },
{ { BLOCK_8X4, BLOCK_INVALID }, { BLOCK_4X4, BLOCK_4X2 } },
#elif CONFIG_CB4X4
#else
{ { BLOCK_4X4, BLOCK_4X4 }, { BLOCK_4X4, BLOCK_4X4 } },
{ { BLOCK_4X8, BLOCK_4X4 }, { BLOCK_INVALID, BLOCK_4X4 } },
{ { BLOCK_8X4, BLOCK_INVALID }, { BLOCK_4X4, BLOCK_4X4 } },
#else
{ { BLOCK_4X4, BLOCK_INVALID }, { BLOCK_INVALID, BLOCK_INVALID } },
{ { BLOCK_4X8, BLOCK_4X4 }, { BLOCK_INVALID, BLOCK_INVALID } },
{ { BLOCK_8X4, BLOCK_INVALID }, { BLOCK_4X4, BLOCK_INVALID } },
#endif
#endif // CONFIG_CHROMA_SUB8X8
{ { BLOCK_8X8, BLOCK_8X4 }, { BLOCK_4X8, BLOCK_4X4 } },
{ { BLOCK_8X16, BLOCK_8X8 }, { BLOCK_INVALID, BLOCK_4X8 } },
{ { BLOCK_16X8, BLOCK_INVALID }, { BLOCK_8X8, BLOCK_8X4 } },
......
......@@ -45,11 +45,7 @@ extern "C" {
#define MIN_SB_SIZE_LOG2 6
// Pixels per Mode Info (MI) unit
#if CONFIG_CB4X4
#define MI_SIZE_LOG2 2
#else
#define MI_SIZE_LOG2 3
#endif
#define MI_SIZE (1 << MI_SIZE_LOG2)
// MI-units per max superblock (MI Block - MIB)
......
......@@ -244,7 +244,7 @@ static INLINE int_mv gm_get_motion_vector(const WarpedMotionParams *gm,
int is_integer
#endif
) {
const int unify_bsize = CONFIG_CB4X4;
const int unify_bsize = 1;
int_mv res;
const int32_t *mat = gm->wmmat;
int x, y, tx, ty;
......
......@@ -65,11 +65,7 @@ static uint8_t add_ref_mv_candidate(
int subsampling_y) {
int index = 0, ref;
int newmv_count = 0;
#if CONFIG_CB4X4
const int unify_bsize = 1;
#else
const int unify_bsize = 0;
#endif
assert(weight % 2 == 0);
#if !CONFIG_EXT_WARPED_MOTION
(void)bsize;
......@@ -274,7 +270,6 @@ static uint8_t scan_row_mbmi(const AV1_COMMON *cm, const MACROBLOCKD *xd,
int i;
uint8_t newmv_count = 0;
int col_offset = 0;
#if CONFIG_CB4X4
const int shift = 0;
// TODO(jingning): Revisit this part after cb4x4 is stable.
if (abs(row_offset) > 1) {
......@@ -282,10 +277,6 @@ static uint8_t scan_row_mbmi(const AV1_COMMON *cm, const MACROBLOCKD *xd,
if (mi_col & 0x01 && xd->n8_w < n8_w_8) --col_offset;
}
const int use_step_16 = (xd->n8_w >= 16);
#else
const int shift = 1;
const int use_step_16 = (xd->n8_w >= 8);
#endif
MODE_INFO **const candidate_mi0 = xd->mi + row_offset * xd->mi_stride;
for (i = 0; i < end_mi && *refmv_count < MAX_REF_MV_STACK_SIZE;) {
......@@ -350,17 +341,12 @@ static uint8_t scan_col_mbmi(const AV1_COMMON *cm, const MACROBLOCKD *xd,
int i;
uint8_t newmv_count = 0;
int row_offset = 0;
#if CONFIG_CB4X4
const int shift = 0;
if (abs(col_offset) > 1) {
row_offset = 1;
if (mi_row & 0x01 && xd->n8_h < n8_h_8) --row_offset;
}
const int use_step_16 = (xd->n8_h >= 16);
#else
const int shift = 1;
const int use_step_16 = (xd->n8_h >= 8);
#endif
for (i = 0; i < end_mi && *refmv_count < MAX_REF_MV_STACK_SIZE;) {
const MODE_INFO *const candidate_mi =
......@@ -751,10 +737,8 @@ static void setup_ref_mv_list(const AV1_COMMON *cm, const MACROBLOCKD *xd,
const TileInfo *const tile = &xd->tile;
int max_row_offset = 0, max_col_offset = 0;
#if CONFIG_CB4X4
const int row_adj = (xd->n8_h < mi_size_high[BLOCK_8X8]) && (mi_row & 0x01);
const int col_adj = (xd->n8_w < mi_size_wide[BLOCK_8X8]) && (mi_col & 0x01);
#endif
int processed_rows = 0;
int processed_cols = 0;
int row_offset, col_offset;
......@@ -765,21 +749,13 @@ static void setup_ref_mv_list(const AV1_COMMON *cm, const MACROBLOCKD *xd,
// Find valid maximum row/col offset.
if (xd->up_available) {
#if CONFIG_CB4X4
max_row_offset = -(MVREF_ROWS << 1) + row_adj;
#else
max_row_offset = -MVREF_ROWS;
#endif
max_row_offset =
find_valid_row_offset(tile, mi_row, cm->mi_rows, cm, max_row_offset);
}
if (xd->left_available) {
#if CONFIG_CB4X4
max_col_offset = -(MVREF_COLS << 1) + col_adj;
#else
max_col_offset = -MVREF_COLS;
#endif
max_col_offset = find_valid_col_offset(tile, mi_col, max_col_offset);
}
......@@ -860,13 +836,9 @@ static void setup_ref_mv_list(const AV1_COMMON *cm, const MACROBLOCKD *xd,
{
int blk_row, blk_col;
int coll_blk_count = 0;
#if CONFIG_CB4X4
const int mi_step = (xd->n8_w == 1 || xd->n8_h == 1)
? mi_size_wide[BLOCK_8X8]
: mi_size_wide[BLOCK_16X16];
#else
const int mi_step = mi_size_wide[BLOCK_16X16];
#endif
#if CONFIG_TPL_MV
// Modified sample positions to be consistent with frame_mvs
......@@ -929,13 +901,8 @@ static void setup_ref_mv_list(const AV1_COMMON *cm, const MACROBLOCKD *xd,
#endif // CONFIG_GLOBAL_MOTION && USE_CUR_GM_REFMV
refmv_count);
for (idx = 2; idx <= MVREF_ROWS; ++idx) {
#if CONFIG_CB4X4
row_offset = -(idx << 1) + 1 + row_adj;
col_offset = -(idx << 1) + 1 + col_adj;
#else
row_offset = -idx;
col_offset = -idx;
#endif
if (abs(row_offset) <= abs(max_row_offset) &&
abs(row_offset) > processed_rows)
......@@ -956,11 +923,7 @@ static void setup_ref_mv_list(const AV1_COMMON *cm, const MACROBLOCKD *xd,
max_col_offset, &processed_cols);
}
#if CONFIG_CB4X4
col_offset = -(MVREF_COLS << 1) + 1 + col_adj;
#else
col_offset = -MVREF_COLS;
#endif
if (abs(col_offset) <= abs(max_col_offset) &&
abs(col_offset) > processed_cols)
scan_col_mbmi(cm, xd, mi_row, mi_col, block, rf, col_offset, ref_mv_stack,
......@@ -1138,12 +1101,10 @@ static void find_mv_refs_idx(const AV1_COMMON *cm, const MACROBLOCKD *xd,
mv_ref_search[8].row = num_8x8_blocks_high - 1;
mv_ref_search[8].col = -3;
#if CONFIG_CB4X4
for (i = 0; i < MVREF_NEIGHBOURS; ++i) {
mv_ref_search[i].row *= 2;
mv_ref_search[i].col *= 2;
}
#endif // CONFIG_CB4X4
// The nearest 2 blocks are treated differently
// if the size < 8x8 we get the mv from the bmi substructure,
......
......@@ -353,13 +353,8 @@ static INLINE int16_t av1_mode_context_analyzer(
if (block >= 0) {
mode_ctx = mode_context[rf[0]] & 0x00ff;
#if !CONFIG_CB4X4
if (block > 0 && bsize < BLOCK_8X8 && bsize > BLOCK_4X4)
mode_ctx |= (1 << SKIP_NEARESTMV_SUB8X8_OFFSET);
#else
(void)block;
(void)bsize;
#endif
return mode_ctx;
}
......
......@@ -887,7 +887,6 @@ static INLINE void update_partition_context(MACROBLOCKD *xd, int mi_row,
#endif // CONFIG_EXT_PARTITION_TYPES
}
#if CONFIG_CB4X4
static INLINE int is_chroma_reference(int mi_row, int mi_col, BLOCK_SIZE bsize,
int subsampling_x, int subsampling_y) {
#if CONFIG_CHROMA_SUB8X8
......@@ -923,7 +922,6 @@ static INLINE BLOCK_SIZE scale_chroma_bsize(BLOCK_SIZE bsize, int subsampling_x,
return bs;
}
#endif
static INLINE aom_cdf_prob cdf_element_prob(const aom_cdf_prob *cdf,
size_t element) {
......@@ -1338,11 +1336,7 @@ static INLINE void set_use_reference_buffer(AV1_COMMON *const cm, int use) {
static INLINE void set_sb_size(AV1_COMMON *const cm, BLOCK_SIZE sb_size) {
cm->sb_size = sb_size;
cm->mib_size = mi_size_wide[cm->sb_size];
#if CONFIG_CB4X4
cm->mib_size_log2 = b_width_log2_lookup[cm->sb_size];
#else
cm->mib_size_log2 = mi_width_log2_lookup[cm->sb_size];
#endif
}
static INLINE int all_lossless(const AV1_COMMON *cm, const MACROBLOCKD *xd) {
......
......@@ -1133,10 +1133,8 @@ static INLINE void build_inter_predictors(
#endif // CONFIG_COMPOUND_SINGLEREF
#endif // CONFIG_GLOBAL_MOTION
#if CONFIG_CB4X4
(void)block;
(void)cm;
#endif
#if CONFIG_CHROMA_SUB8X8
const BLOCK_SIZE bsize = mi->mbmi.sb_type;
......@@ -1371,19 +1369,7 @@ static INLINE void build_inter_predictors(
const struct scale_factors *const sf = &xd->block_refs[ref]->sf;
struct buf_2d *const pre_buf = &pd->pre[ref];
#endif // CONFIG_INTRABC
#if CONFIG_CB4X4
const MV mv = mi->mbmi.mv[ref].as_mv;
#else
const MV mv =
#if CONFIG_MOTION_VAR
(mi->mbmi.sb_type < BLOCK_8X8 && !build_for_obmc)
?
#else
mi->mbmi.sb_type < BLOCK_8X8 ?
#endif
average_split_mvs(pd, mi, ref, block)
: mi->mbmi.mv[ref].as_mv;
#endif
const int is_scaled = av1_is_scaled(sf);
if (is_scaled) {
......@@ -1536,21 +1522,15 @@ static void build_inter_predictors_for_planes(const AV1_COMMON *cm,
int plane;
const int mi_x = mi_col * MI_SIZE;
const int mi_y = mi_row * MI_SIZE;
#if CONFIG_CB4X4
const int unify_bsize = 1;
#else
const int unify_bsize = 0;
#endif
for (plane = plane_from; plane <= plane_to; ++plane) {
const struct macroblockd_plane *pd = &xd->plane[plane];
const int bw = pd->width;
const int bh = pd->height;
#if CONFIG_CB4X4
if (!is_chroma_reference(mi_row, mi_col, bsize, pd->subsampling_x,
pd->subsampling_y))
continue;
#endif
if (xd->mi[0]->mbmi.sb_type < BLOCK_8X8 && !unify_bsize) {
const PARTITION_TYPE bp = bsize - xd->mi[0]->mbmi.sb_type;
......@@ -2248,32 +2228,10 @@ void av1_build_prediction_by_bottom_preds(const AV1_COMMON *cm, MACROBLOCKD *xd,
const struct macroblockd_plane *pd = &xd->plane[j];
bw = (mi_step << MI_SIZE_LOG2) >> pd->subsampling_x;
bh = (xd->n8_h << (MI_SIZE_LOG2 - 1)) >> pd->subsampling_y;
if (mbmi->sb_type < BLOCK_8X8 && !CONFIG_CB4X4) {
const PARTITION_TYPE bp = BLOCK_8X8 - mbmi->sb_type;
const int have_vsplit = bp != PARTITION_HORZ;
const int have_hsplit = bp != PARTITION_VERT;
const int num_4x4_w = 2 >> (!have_vsplit);
const int num_4x4_h = 2 >> (!have_hsplit);
const int pw = 8 >> (have_vsplit + pd->subsampling_x);
int x, y;
for (y = 0; y < num_4x4_h; ++y)
for (x = 0; x < num_4x4_w; ++x) {
if ((bp == PARTITION_HORZ || bp == PARTITION_SPLIT) && y != 0)
continue;
build_inter_predictors(cm, xd, j, mi, 1, y * 2 + x, bw, bh,
(4 * x) >> pd->subsampling_x,
xd->n8_h == 1 ? (4 >> pd->subsampling_y) : 0,
pw, bh, mi_x, mi_y);
}
} else {
build_inter_predictors(cm, xd, j, mi, 1, 0, bw, bh, 0,
xd->n8_h == 1 ? (4 >> pd->subsampling_y) : 0, bw,
bh, mi_x, mi_y);
}
}
*mbmi = backup_mbmi;
}
xd->mb_to_left_edge = -((mi_col * MI_SIZE) * 8);
......@@ -2346,31 +2304,9 @@ void av1_build_prediction_by_right_preds(const AV1_COMMON *cm, MACROBLOCKD *xd,
const struct macroblockd_plane *pd = &xd->plane[j];
bw = (xd->n8_w << (MI_SIZE_LOG2 - 1)) >> pd->subsampling_x;
bh = (mi_step << MI_SIZE_LOG2) >> pd->subsampling_y;
if (mbmi->sb_type < BLOCK_8X8 && !CONFIG_CB4X4) {
const PARTITION_TYPE bp = BLOCK_8X8 - mbmi->sb_type;
const int have_vsplit = bp != PARTITION_HORZ;
const int have_hsplit = bp != PARTITION_VERT;
const int num_4x4_w = 2 >> (!have_vsplit);
const int num_4x4_h = 2 >> (!have_hsplit);
const int ph = 8 >> (have_hsplit + pd->subsampling_y);
int x, y;
for (y = 0; y < num_4x4_h; ++y)
for (x = 0; x < num_4x4_w; ++x) {
if ((bp == PARTITION_VERT || bp == PARTITION_SPLIT) && x != 0)
continue;
build_inter_predictors(cm, xd, j, mi, 1, y * 2 + x, bw, bh,
xd->n8_w == 1 ? 4 >> pd->subsampling_x : 0,
(4 * y) >> pd->subsampling_y, bw, ph, mi_x,
mi_y);
}
} else {
build_inter_predictors(cm, xd, j, mi, 1, 0, bw, bh,
xd->n8_w == 1 ? 4 >> pd->subsampling_x : 0, 0,
bw, bh, mi_x, mi_y);
}