Commit c27fcccc authored by Urvang Joshi's avatar Urvang Joshi

Code cleanup: mainly rd_pick_partition and methods called from there.

- Const correctness
- Refactoring
- Make variables local when possible
etc
- Remove -Wcast-qual to allow explicitly casting away const.

Change-Id: I6ecb7d345162dc08ccdd17095b0800fb3a00cf2f
parent b910c0bd
......@@ -242,7 +242,7 @@ typedef struct macroblockd {
FRAME_CONTEXT *fc;
/* pointers to reference frames */
RefBuffer *block_refs[2];
const RefBuffer *block_refs[2];
/* pointer to current frame */
const YV12_BUFFER_CONFIG *cur_buf;
......
......@@ -673,8 +673,8 @@ void av1_find_best_ref_mvs(int allow_hp, int_mv *mvlist, int_mv *nearest_mv,
*near_mv = mvlist[1];
}
void av1_append_sub8x8_mvs_for_idx(AV1_COMMON *cm, MACROBLOCKD *xd, int block,
int ref, int mi_row, int mi_col,
void av1_append_sub8x8_mvs_for_idx(const AV1_COMMON *cm, MACROBLOCKD *xd,
int block, int ref, int mi_row, int mi_col,
int_mv *nearest_mv, int_mv *near_mv) {
int_mv mv_list[MAX_MV_REF_CANDIDATES];
MODE_INFO *const mi = xd->mi[0];
......
......@@ -435,8 +435,8 @@ void av1_find_mv_refs(const AV1_COMMON *cm, const MACROBLOCKD *xd,
void av1_find_best_ref_mvs(int allow_hp, int_mv *mvlist, int_mv *nearest_mv,
int_mv *near_mv);
void av1_append_sub8x8_mvs_for_idx(AV1_COMMON *cm, MACROBLOCKD *xd, int block,
int ref, int mi_row, int mi_col,
void av1_append_sub8x8_mvs_for_idx(const AV1_COMMON *cm, MACROBLOCKD *xd,
int block, int ref, int mi_row, int mi_col,
int_mv *nearest_mv, int_mv *near_mv);
#ifdef __cplusplus
......
......@@ -355,7 +355,8 @@ static INLINE YV12_BUFFER_CONFIG *get_ref_frame(AV1_COMMON *cm, int index) {
return &cm->buffer_pool->frame_bufs[cm->ref_frame_map[index]].buf;
}
static INLINE YV12_BUFFER_CONFIG *get_frame_new_buffer(AV1_COMMON *cm) {
static INLINE YV12_BUFFER_CONFIG *get_frame_new_buffer(
const AV1_COMMON *const cm) {
return &cm->buffer_pool->frame_bufs[cm->new_fb_idx].buf;
}
......
......@@ -20,7 +20,7 @@
extern "C" {
#endif
static INLINE int get_segment_id(const AV1_COMMON *cm,
static INLINE int get_segment_id(const AV1_COMMON *const cm,
const uint8_t *segment_ids, BLOCK_SIZE bsize,
int mi_row, int mi_col) {
const int mi_offset = mi_row * cm->mi_cols + mi_col;
......
......@@ -407,7 +407,7 @@ void av1_setup_obmc_mask(int length, const uint8_t *mask[2]) {
// top/left neighboring blocks' inter predictors with the regular inter
// prediction. We assume the original prediction (bmc) is stored in
// xd->plane[].dst.buf
void av1_build_obmc_inter_prediction(AV1_COMMON *cm, MACROBLOCKD *xd,
void av1_build_obmc_inter_prediction(const AV1_COMMON *cm, MACROBLOCKD *xd,
int mi_row, int mi_col,
int use_tmp_dst_buf,
uint8_t *final_buf[MAX_MB_PLANE],
......@@ -573,7 +573,7 @@ void av1_build_obmc_inter_prediction(AV1_COMMON *cm, MACROBLOCKD *xd,
} // each mi in the left column
}
void av1_build_prediction_by_above_preds(AV1_COMMON *cm, MACROBLOCKD *xd,
void av1_build_prediction_by_above_preds(const AV1_COMMON *cm, MACROBLOCKD *xd,
int mi_row, int mi_col,
uint8_t *tmp_buf[MAX_MB_PLANE],
const int tmp_stride[MAX_MB_PLANE]) {
......@@ -600,8 +600,8 @@ void av1_build_prediction_by_above_preds(AV1_COMMON *cm, MACROBLOCKD *xd,
pd->subsampling_x, pd->subsampling_y);
}
for (ref = 0; ref < 1 + has_second_ref(mbmi); ++ref) {
MV_REFERENCE_FRAME frame = mbmi->ref_frame[ref];
RefBuffer *ref_buf = &cm->frame_refs[frame - LAST_FRAME];
const MV_REFERENCE_FRAME frame = mbmi->ref_frame[ref];
const RefBuffer *const ref_buf = &cm->frame_refs[frame - LAST_FRAME];
xd->block_refs[ref] = ref_buf;
if ((!av1_is_valid_scale(&ref_buf->sf)))
......@@ -649,7 +649,7 @@ void av1_build_prediction_by_above_preds(AV1_COMMON *cm, MACROBLOCKD *xd,
xd->mb_to_left_edge = -((mi_col * MI_SIZE) * 8);
}
void av1_build_prediction_by_left_preds(AV1_COMMON *cm, MACROBLOCKD *xd,
void av1_build_prediction_by_left_preds(const AV1_COMMON *cm, MACROBLOCKD *xd,
int mi_row, int mi_col,
uint8_t *tmp_buf[MAX_MB_PLANE],
const int tmp_stride[MAX_MB_PLANE]) {
......@@ -676,8 +676,8 @@ void av1_build_prediction_by_left_preds(AV1_COMMON *cm, MACROBLOCKD *xd,
pd->subsampling_x, pd->subsampling_y);
}
for (ref = 0; ref < 1 + has_second_ref(mbmi); ++ref) {
MV_REFERENCE_FRAME frame = mbmi->ref_frame[ref];
RefBuffer *ref_buf = &cm->frame_refs[frame - LAST_FRAME];
const MV_REFERENCE_FRAME frame = mbmi->ref_frame[ref];
const RefBuffer *const ref_buf = &cm->frame_refs[frame - LAST_FRAME];
xd->block_refs[ref] = ref_buf;
if ((!av1_is_valid_scale(&ref_buf->sf)))
......@@ -725,7 +725,7 @@ void av1_build_prediction_by_left_preds(AV1_COMMON *cm, MACROBLOCKD *xd,
xd->mb_to_top_edge = -((mi_row * MI_SIZE) * 8);
}
void av1_build_obmc_inter_predictors_sb(AV1_COMMON *cm, MACROBLOCKD *xd,
void av1_build_obmc_inter_predictors_sb(const AV1_COMMON *cm, MACROBLOCKD *xd,
int mi_row, int mi_col) {
#if CONFIG_AOM_HIGHBITDEPTH
DECLARE_ALIGNED(16, uint8_t, tmp_buf1[2 * MAX_MB_PLANE * MAX_SB_SQUARE]);
......
......@@ -198,7 +198,7 @@ void av1_setup_pre_planes(MACROBLOCKD *xd, int idx,
#if CONFIG_MOTION_VAR
void av1_setup_obmc_mask(int length, const uint8_t *mask[2]);
void av1_build_obmc_inter_prediction(AV1_COMMON *cm, MACROBLOCKD *xd,
void av1_build_obmc_inter_prediction(const AV1_COMMON *cm, MACROBLOCKD *xd,
int mi_row, int mi_col,
int use_tmp_dst_buf,
uint8_t *final_buf[MAX_MB_PLANE],
......@@ -207,15 +207,15 @@ void av1_build_obmc_inter_prediction(AV1_COMMON *cm, MACROBLOCKD *xd,
const int above_pred_stride[MAX_MB_PLANE],
uint8_t *left_pred_buf[MAX_MB_PLANE],
const int left_pred_stride[MAX_MB_PLANE]);
void av1_build_prediction_by_above_preds(AV1_COMMON *cm, MACROBLOCKD *xd,
void av1_build_prediction_by_above_preds(const AV1_COMMON *cm, MACROBLOCKD *xd,
int mi_row, int mi_col,
uint8_t *tmp_buf[MAX_MB_PLANE],
const int tmp_stride[MAX_MB_PLANE]);
void av1_build_prediction_by_left_preds(AV1_COMMON *cm, MACROBLOCKD *xd,
void av1_build_prediction_by_left_preds(const AV1_COMMON *cm, MACROBLOCKD *xd,
int mi_row, int mi_col,
uint8_t *tmp_buf[MAX_MB_PLANE],
const int tmp_stride[MAX_MB_PLANE]);
void av1_build_obmc_inter_predictors_sb(AV1_COMMON *cm, MACROBLOCKD *xd,
void av1_build_obmc_inter_predictors_sb(const AV1_COMMON *cm, MACROBLOCKD *xd,
int mi_row, int mi_col);
#endif // CONFIG_MOTION_VAR
static INLINE int has_subpel_mv_component(const MODE_INFO *const mi,
......
......@@ -111,9 +111,9 @@ void av1_setup_in_frame_q_adj(AV1_COMP *cpi) {
// Select a segment for the current block.
// The choice of segment for a block depends on the ratio of the projected
// bits for the block vs a target average and its spatial complexity.
void av1_caq_select_segment(AV1_COMP *cpi, MACROBLOCK *mb, BLOCK_SIZE bs,
void av1_caq_select_segment(const AV1_COMP *cpi, MACROBLOCK *mb, BLOCK_SIZE bs,
int mi_row, int mi_col, int projected_rate) {
AV1_COMMON *const cm = &cpi->common;
const AV1_COMMON *const cm = &cpi->common;
const int mi_offset = mi_row * cm->mi_cols + mi_col;
const int bw = num_8x8_blocks_wide_lookup[BLOCK_64X64];
......
......@@ -22,7 +22,7 @@ struct AV1_COMP;
struct macroblock;
// Select a segment for the current Block.
void av1_caq_select_segment(struct AV1_COMP *cpi, struct macroblock *,
void av1_caq_select_segment(const struct AV1_COMP *cpi, struct macroblock *,
BLOCK_SIZE bs, int mi_row, int mi_col,
int projected_rate);
......
......@@ -209,7 +209,7 @@ int av1_cyclic_refresh_rc_bits_per_mb(const AV1_COMP *cpi, int i,
// Prior to coding a given prediction block, of size bsize at (mi_row, mi_col),
// check if we should reset the segment_id, and update the cyclic_refresh map
// and segmentation map.
void av1_cyclic_refresh_update_segment(AV1_COMP *const cpi,
void av1_cyclic_refresh_update_segment(const AV1_COMP *cpi,
MB_MODE_INFO *const mbmi, int mi_row,
int mi_col, BLOCK_SIZE bsize,
int64_t rate, int64_t dist, int skip) {
......
......@@ -49,7 +49,7 @@ int av1_cyclic_refresh_rc_bits_per_mb(const struct AV1_COMP *cpi, int i,
// Prior to coding a given prediction block, of size bsize at (mi_row, mi_col),
// check if we should reset the segment_id, and update the cyclic_refresh map
// and segmentation map.
void av1_cyclic_refresh_update_segment(struct AV1_COMP *const cpi,
void av1_cyclic_refresh_update_segment(const struct AV1_COMP *cpi,
MB_MODE_INFO *const mbmi, int mi_row,
int mi_col, BLOCK_SIZE bsize,
int64_t rate, int64_t dist, int skip);
......
......@@ -138,7 +138,7 @@ static void aq_highbd_8_variance(const uint8_t *a8, int a_stride,
}
#endif // CONFIG_AOM_HIGHBITDEPTH
static unsigned int block_variance(AV1_COMP *cpi, MACROBLOCK *x,
static unsigned int block_variance(const AV1_COMP *const cpi, MACROBLOCK *x,
BLOCK_SIZE bs) {
MACROBLOCKD *xd = &x->e_mbd;
unsigned int var, sse;
......@@ -186,14 +186,14 @@ static unsigned int block_variance(AV1_COMP *cpi, MACROBLOCK *x,
}
}
double av1_log_block_var(AV1_COMP *cpi, MACROBLOCK *x, BLOCK_SIZE bs) {
double av1_log_block_var(const AV1_COMP *cpi, MACROBLOCK *x, BLOCK_SIZE bs) {
unsigned int var = block_variance(cpi, x, bs);
aom_clear_system_state();
return log(var + 1.0);
}
#define DEFAULT_E_MIDPOINT 10.0
int av1_block_energy(AV1_COMP *cpi, MACROBLOCK *x, BLOCK_SIZE bs) {
int av1_block_energy(const AV1_COMP *cpi, MACROBLOCK *x, BLOCK_SIZE bs) {
double energy;
double energy_midpoint;
aom_clear_system_state();
......
......@@ -21,8 +21,8 @@ extern "C" {
unsigned int av1_vaq_segment_id(int energy);
void av1_vaq_frame_setup(AV1_COMP *cpi);
int av1_block_energy(AV1_COMP *cpi, MACROBLOCK *x, BLOCK_SIZE bs);
double av1_log_block_var(AV1_COMP *cpi, MACROBLOCK *x, BLOCK_SIZE bs);
int av1_block_energy(const AV1_COMP *cpi, MACROBLOCK *x, BLOCK_SIZE bs);
double av1_log_block_var(const AV1_COMP *cpi, MACROBLOCK *x, BLOCK_SIZE bs);
#ifdef __cplusplus
} // extern "C"
......
......@@ -36,12 +36,12 @@ struct macroblock_plane {
struct buf_2d src;
// Quantizer setings
int16_t *quant_fp;
int16_t *round_fp;
int16_t *quant;
int16_t *quant_shift;
int16_t *zbin;
int16_t *round;
const int16_t *quant_fp;
const int16_t *round_fp;
const int16_t *quant;
const int16_t *quant_shift;
const int16_t *zbin;
const int16_t *round;
int64_t quant_thred[2];
};
......
......@@ -47,9 +47,10 @@
#include "av1/encoder/segmentation.h"
#include "av1/encoder/tokenize.h"
static void encode_superblock(AV1_COMP *cpi, ThreadData *td, TOKENEXTRA **t,
int output_enabled, int mi_row, int mi_col,
BLOCK_SIZE bsize, PICK_MODE_CONTEXT *ctx);
static void encode_superblock(const AV1_COMP *const cpi, ThreadData *td,
TOKENEXTRA **t, int output_enabled, int mi_row,
int mi_col, BLOCK_SIZE bsize,
PICK_MODE_CONTEXT *ctx);
// This is used as a reference when computing the source variance for the
// purposes of activity masking.
......@@ -97,7 +98,7 @@ static const uint16_t AV1_HIGH_VAR_OFFS_12[64] = {
};
#endif // CONFIG_AOM_HIGHBITDEPTH
unsigned int av1_get_sby_perpixel_variance(AV1_COMP *cpi,
unsigned int av1_get_sby_perpixel_variance(const AV1_COMP *cpi,
const struct buf_2d *ref,
BLOCK_SIZE bs) {
unsigned int sse;
......@@ -107,7 +108,7 @@ unsigned int av1_get_sby_perpixel_variance(AV1_COMP *cpi,
}
#if CONFIG_AOM_HIGHBITDEPTH
unsigned int av1_high_get_sby_perpixel_variance(AV1_COMP *cpi,
unsigned int av1_high_get_sby_perpixel_variance(const AV1_COMP *cpi,
const struct buf_2d *ref,
BLOCK_SIZE bs, int bd) {
unsigned int var, sse;
......@@ -133,7 +134,7 @@ unsigned int av1_high_get_sby_perpixel_variance(AV1_COMP *cpi,
}
#endif // CONFIG_AOM_HIGHBITDEPTH
static unsigned int get_sby_perpixel_diff_variance(AV1_COMP *cpi,
static unsigned int get_sby_perpixel_diff_variance(const AV1_COMP *const cpi,
const struct buf_2d *ref,
int mi_row, int mi_col,
BLOCK_SIZE bs) {
......@@ -164,21 +165,21 @@ static BLOCK_SIZE get_rd_var_based_fixed_partition(AV1_COMP *cpi, MACROBLOCK *x,
// Lighter version of set_offsets that only sets the mode info
// pointers.
static INLINE void set_mode_info_offsets(AV1_COMP *const cpi,
static INLINE void set_mode_info_offsets(const AV1_COMP *const cpi,
MACROBLOCK *const x,
MACROBLOCKD *const xd, int mi_row,
int mi_col) {
AV1_COMMON *const cm = &cpi->common;
const AV1_COMMON *const cm = &cpi->common;
const int idx_str = xd->mi_stride * mi_row + mi_col;
xd->mi = cm->mi_grid_visible + idx_str;
xd->mi[0] = cm->mi + idx_str;
x->mbmi_ext = cpi->mbmi_ext_base + (mi_row * cm->mi_cols + mi_col);
}
static void set_offsets(AV1_COMP *cpi, const TileInfo *const tile,
static void set_offsets(const AV1_COMP *const cpi, const TileInfo *const tile,
MACROBLOCK *const x, int mi_row, int mi_col,
BLOCK_SIZE bsize) {
AV1_COMMON *const cm = &cpi->common;
const AV1_COMMON *const cm = &cpi->common;
MACROBLOCKD *const xd = &x->e_mbd;
MB_MODE_INFO *mbmi;
const int mi_width = num_8x8_blocks_wide_lookup[bsize];
......@@ -875,11 +876,11 @@ static int choose_partitioning(AV1_COMP *cpi, const TileInfo *const tile,
return 0;
}
static void update_state(AV1_COMP *cpi, ThreadData *td, PICK_MODE_CONTEXT *ctx,
int mi_row, int mi_col, BLOCK_SIZE bsize,
int output_enabled) {
static void update_state(const AV1_COMP *const cpi, ThreadData *td,
PICK_MODE_CONTEXT *ctx, int mi_row, int mi_col,
BLOCK_SIZE bsize, int output_enabled) {
int i, x_idx, y;
AV1_COMMON *const cm = &cpi->common;
const AV1_COMMON *const cm = &cpi->common;
RD_COUNTS *const rdc = &td->rd_counts;
MACROBLOCK *const x = &td->mb;
MACROBLOCKD *const xd = &x->e_mbd;
......@@ -983,18 +984,22 @@ static void update_state(AV1_COMP *cpi, ThreadData *td, PICK_MODE_CONTEXT *ctx,
if (!output_enabled) return;
#if CONFIG_INTERNAL_STATS
if (frame_is_intra_only(cm)) {
static const int kf_mode_index[] = {
THR_DC /*DC_PRED*/, THR_V_PRED /*V_PRED*/,
THR_H_PRED /*H_PRED*/, THR_D45_PRED /*D45_PRED*/,
THR_D135_PRED /*D135_PRED*/, THR_D117_PRED /*D117_PRED*/,
THR_D153_PRED /*D153_PRED*/, THR_D207_PRED /*D207_PRED*/,
THR_D63_PRED /*D63_PRED*/, THR_TM /*TM_PRED*/,
};
++cpi->mode_chosen_counts[kf_mode_index[mbmi->mode]];
} else {
// Note how often each mode chosen as best
++cpi->mode_chosen_counts[ctx->best_mode_index];
{
unsigned int *const mode_chosen_counts =
(unsigned int *)cpi->mode_chosen_counts; // Cast const away.
if (frame_is_intra_only(cm)) {
static const int kf_mode_index[] = {
THR_DC /*DC_PRED*/, THR_V_PRED /*V_PRED*/,
THR_H_PRED /*H_PRED*/, THR_D45_PRED /*D45_PRED*/,
THR_D135_PRED /*D135_PRED*/, THR_D117_PRED /*D117_PRED*/,
THR_D153_PRED /*D153_PRED*/, THR_D207_PRED /*D207_PRED*/,
THR_D63_PRED /*D63_PRED*/, THR_TM /*TM_PRED*/,
};
++mode_chosen_counts[kf_mode_index[mbmi->mode]];
} else {
// Note how often each mode chosen as best
++mode_chosen_counts[ctx->best_mode_index];
}
}
#endif
if (!frame_is_intra_only(cm)) {
......@@ -1053,21 +1058,21 @@ void av1_setup_src_planes(MACROBLOCK *x, const YV12_BUFFER_CONFIG *src,
x->e_mbd.plane[i].subsampling_y);
}
static int set_segment_rdmult(AV1_COMP *const cpi, MACROBLOCK *const x,
static int set_segment_rdmult(const AV1_COMP *const cpi, MACROBLOCK *const x,
int8_t segment_id) {
int segment_qindex;
AV1_COMMON *const cm = &cpi->common;
const AV1_COMMON *const cm = &cpi->common;
av1_init_plane_quantizers(cpi, x);
aom_clear_system_state();
segment_qindex = av1_get_qindex(&cm->seg, segment_id, cm->base_qindex);
return av1_compute_rd_mult(cpi, segment_qindex + cm->y_dc_delta_q);
}
static void rd_pick_sb_modes(AV1_COMP *cpi, TileDataEnc *tile_data,
static void rd_pick_sb_modes(const AV1_COMP *const cpi, TileDataEnc *tile_data,
MACROBLOCK *const x, int mi_row, int mi_col,
RD_COST *rd_cost, BLOCK_SIZE bsize,
PICK_MODE_CONTEXT *ctx, int64_t best_rd) {
AV1_COMMON *const cm = &cpi->common;
const AV1_COMMON *const cm = &cpi->common;
TileInfo *const tile_info = &tile_data->tile_info;
MACROBLOCKD *const xd = &x->e_mbd;
MB_MODE_INFO *mbmi;
......@@ -1206,7 +1211,7 @@ static void update_inter_mode_stats(FRAME_COUNTS *counts, PREDICTION_MODE mode,
}
#endif
static void update_stats(AV1_COMMON *cm, ThreadData *td) {
static void update_stats(const AV1_COMMON *const cm, ThreadData *td) {
const MACROBLOCK *x = &td->mb;
const MACROBLOCKD *const xd = &x->e_mbd;
const MODE_INFO *const mi = xd->mi[0];
......@@ -1403,8 +1408,8 @@ static void save_context(MACROBLOCK *const x, int mi_row, int mi_col,
sizeof(xd->left_seg_context[0]) * mi_height);
}
static void encode_b(AV1_COMP *cpi, const TileInfo *const tile, ThreadData *td,
TOKENEXTRA **tp, int mi_row, int mi_col,
static void encode_b(const AV1_COMP *const cpi, const TileInfo *const tile,
ThreadData *td, TOKENEXTRA **tp, int mi_row, int mi_col,
int output_enabled, BLOCK_SIZE bsize,
PICK_MODE_CONTEXT *ctx) {
MACROBLOCK *const x = &td->mb;
......@@ -1417,10 +1422,11 @@ static void encode_b(AV1_COMP *cpi, const TileInfo *const tile, ThreadData *td,
}
}
static void encode_sb(AV1_COMP *cpi, ThreadData *td, const TileInfo *const tile,
TOKENEXTRA **tp, int mi_row, int mi_col,
int output_enabled, BLOCK_SIZE bsize, PC_TREE *pc_tree) {
AV1_COMMON *const cm = &cpi->common;
static void encode_sb(const AV1_COMP *const cpi, ThreadData *td,
const TileInfo *const tile, TOKENEXTRA **tp, int mi_row,
int mi_col, int output_enabled, BLOCK_SIZE bsize,
PC_TREE *pc_tree) {
const AV1_COMMON *const cm = &cpi->common;
MACROBLOCK *const x = &td->mb;
MACROBLOCKD *const xd = &x->e_mbd;
......@@ -1940,26 +1946,25 @@ static void rd_auto_partition_range(AV1_COMP *cpi, const TileInfo *const tile,
}
// TODO(jingning) refactor functions setting partition search range
static void set_partition_range(AV1_COMMON *cm, MACROBLOCKD *xd, int mi_row,
static void set_partition_range(const AV1_COMMON *const cm,
const MACROBLOCKD *const xd, int mi_row,
int mi_col, BLOCK_SIZE bsize,
BLOCK_SIZE *min_bs, BLOCK_SIZE *max_bs) {
int mi_width = num_8x8_blocks_wide_lookup[bsize];
int mi_height = num_8x8_blocks_high_lookup[bsize];
BLOCK_SIZE *const min_bs,
BLOCK_SIZE *const max_bs) {
const int mi_width = num_8x8_blocks_wide_lookup[bsize];
const int mi_height = num_8x8_blocks_high_lookup[bsize];
int idx, idy;
MODE_INFO *mi;
const int idx_str = cm->mi_stride * mi_row + mi_col;
MODE_INFO **prev_mi = &cm->prev_mi_grid_visible[idx_str];
BLOCK_SIZE bs, min_size, max_size;
min_size = BLOCK_64X64;
max_size = BLOCK_4X4;
MODE_INFO **const prev_mi = &cm->prev_mi_grid_visible[idx_str];
BLOCK_SIZE min_size = BLOCK_64X64; // default values
BLOCK_SIZE max_size = BLOCK_4X4;
if (prev_mi) {
for (idy = 0; idy < mi_height; ++idy) {
for (idx = 0; idx < mi_width; ++idx) {
mi = prev_mi[idy * cm->mi_stride + idx];
bs = mi ? mi->mbmi.sb_type : bsize;
const MODE_INFO *const mi = prev_mi[idy * cm->mi_stride + idx];
const BLOCK_SIZE bs = mi ? mi->mbmi.sb_type : bsize;
min_size = AOMMIN(min_size, bs);
max_size = AOMMAX(max_size, bs);
}
......@@ -1968,8 +1973,8 @@ static void set_partition_range(AV1_COMMON *cm, MACROBLOCKD *xd, int mi_row,
if (xd->left_available) {
for (idy = 0; idy < mi_height; ++idy) {
mi = xd->mi[idy * cm->mi_stride - 1];
bs = mi ? mi->mbmi.sb_type : bsize;
const MODE_INFO *const mi = xd->mi[idy * cm->mi_stride - 1];
const BLOCK_SIZE bs = mi ? mi->mbmi.sb_type : bsize;
min_size = AOMMIN(min_size, bs);
max_size = AOMMAX(max_size, bs);
}
......@@ -1977,8 +1982,8 @@ static void set_partition_range(AV1_COMMON *cm, MACROBLOCKD *xd, int mi_row,
if (xd->up_available) {
for (idx = 0; idx < mi_width; ++idx) {
mi = xd->mi[idx - cm->mi_stride];
bs = mi ? mi->mbmi.sb_type : bsize;
const MODE_INFO *const mi = xd->mi[idx - cm->mi_stride];
const BLOCK_SIZE bs = mi ? mi->mbmi.sb_type : bsize;
min_size = AOMMIN(min_size, bs);
max_size = AOMMAX(max_size, bs);
}
......@@ -2052,25 +2057,25 @@ static INLINE int get_motion_inconsistency(MOTION_DIRECTION this_mv,
// TODO(jingning,jimbankoski,rbultje): properly skip partition types that are
// unlikely to be selected depending on previous rate-distortion optimization
// results, for encoding speed-up.
static void rd_pick_partition(AV1_COMP *cpi, ThreadData *td,
static void rd_pick_partition(const AV1_COMP *const cpi, ThreadData *td,
TileDataEnc *tile_data, TOKENEXTRA **tp,
int mi_row, int mi_col, BLOCK_SIZE bsize,
RD_COST *rd_cost, int64_t best_rd,
PC_TREE *pc_tree) {
AV1_COMMON *const cm = &cpi->common;
const AV1_COMMON *const cm = &cpi->common;
TileInfo *const tile_info = &tile_data->tile_info;
MACROBLOCK *const x = &td->mb;
MACROBLOCKD *const xd = &x->e_mbd;
const MACROBLOCKD *const xd = &x->e_mbd;
const int mi_step = num_8x8_blocks_wide_lookup[bsize] / 2;
ENTROPY_CONTEXT l[16 * MAX_MB_PLANE], a[16 * MAX_MB_PLANE];
PARTITION_CONTEXT sl[8], sa[8];
TOKENEXTRA *tp_orig = *tp;
const TOKENEXTRA *const tp_orig = *tp;
PICK_MODE_CONTEXT *ctx = &pc_tree->none;
int i, pl;
BLOCK_SIZE subsize;
RD_COST this_rdc, sum_rdc, best_rdc;
int do_split = bsize >= BLOCK_8X8;
int do_rect = 1;
const int bsize_at_least_8x8 = (bsize >= BLOCK_8X8);
int do_square_split = bsize_at_least_8x8;
int do_rectangular_split = 1;
// Override skipping rectangular partition operations for edge blocks
const int force_horz_split = (mi_row + mi_step >= cm->mi_rows);
......@@ -2088,9 +2093,9 @@ static void rd_pick_partition(AV1_COMP *cpi, ThreadData *td,
int partition_none_allowed = !force_horz_split && !force_vert_split;
int partition_horz_allowed =
!force_vert_split && yss <= xss && bsize >= BLOCK_8X8;
!force_vert_split && yss <= xss && bsize_at_least_8x8;
int partition_vert_allowed =
!force_horz_split && xss <= yss && bsize >= BLOCK_8X8;
!force_horz_split && xss <= yss && bsize_at_least_8x8;
(void)*tp_orig;
assert(num_8x8_blocks_wide_lookup[bsize] ==
......@@ -2107,7 +2112,7 @@ static void rd_pick_partition(AV1_COMP *cpi, ThreadData *td,
x->mb_energy = av1_block_energy(cpi, x, bsize);
if (cpi->sf.cb_partition_search && bsize == BLOCK_16X16) {
int cb_partition_search_ctrl =
const int cb_partition_search_ctrl =
((pc_tree->index == 0 || pc_tree->index == 3) +
get_chessboard_index(cm->current_video_frame)) &
0x1;
......@@ -2119,12 +2124,13 @@ static void rd_pick_partition(AV1_COMP *cpi, ThreadData *td,
// Determine partition types in search according to the speed features.
// The threshold set here has to be of square block size.
if (cpi->sf.auto_min_max_partition_size) {
partition_none_allowed &= (bsize <= max_size && bsize >= min_size);
partition_horz_allowed &=
((bsize <= max_size && bsize > min_size) || force_horz_split);
partition_vert_allowed &=
((bsize <= max_size && bsize > min_size) || force_vert_split);
do_split &= bsize > min_size;
const int no_partition_allowed = (bsize <= max_size && bsize >= min_size);
// Note: Further partitioning is NOT allowed when bsize == min_size already.
const int partition_allowed = (bsize <= max_size && bsize > min_size);
partition_none_allowed &= no_partition_allowed;
partition_horz_allowed &= partition_allowed || force_horz_split;
partition_vert_allowed &= partition_allowed || force_vert_split;
do_square_split &= bsize > min_size;
}
if (cpi->sf.use_square_partition_only) {
partition_horz_allowed &= force_horz_split;
......@@ -2144,7 +2150,7 @@ static void rd_pick_partition(AV1_COMP *cpi, ThreadData *td,
#if CONFIG_FP_MB_STATS
// Decide whether we shall split directly and skip searching NONE by using
// the first pass block statistics
if (cpi->use_fp_mb_stats && bsize >= BLOCK_32X32 && do_split &&
if (cpi->use_fp_mb_stats && bsize >= BLOCK_32X32 && do_square_split &&
partition_none_allowed && src_diff_var > 4 &&
cm->base_qindex < qindex_split_threshold_lookup[bsize]) {
int mb_row = mi_row >> 1;
......@@ -2197,25 +2203,25 @@ static void rd_pick_partition(AV1_COMP *cpi, ThreadData *td,
rd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col, &this_rdc, bsize, ctx,
best_rdc.rdcost);
if (this_rdc.rate != INT_MAX) {
if (bsize >= BLOCK_8X8) {
pl = partition_plane_context(xd, mi_row, mi_col, bsize);
this_rdc.rate += cpi->partition_cost[pl][PARTITION_NONE];
if (bsize_at_least_8x8) {
const int partition_context =
partition_plane_context(xd, mi_row, mi_col, bsize);
this_rdc.rate += cpi->partition_cost[partition_context][PARTITION_NONE];
this_rdc.rdcost =
RDCOST(x->rdmult, x->rddiv, this_rdc.rate, this_rdc.dist);
}
if (this_rdc.rdcost < best_rdc.rdcost) {
int64_t dist_breakout_thr = cpi->sf.partition_search_breakout_dist_thr;
int rate_breakout_thr = cpi->sf.partition_search_breakout_rate_thr;
best_rdc = this_rdc;
if (bsize >= BLOCK_8X8) pc_tree->partitioning = PARTITION_NONE;
// Adjust dist breakout threshold according to the partition size.
dist_breakout_thr >>=
8 - (b_width_log2_lookup[bsize] + b_height_log2_lookup[bsize]);
const int64_t dist_breakout_thr =
cpi->sf.partition_search_breakout_dist_thr >>
(8 - (b_width_log2_lookup[bsize] + b_height_log2_lookup[bsize]));
const int rate_breakout_thr =
cpi->sf.partition_search_breakout_rate_thr *
num_pels_log2_lookup[bsize];
rate_breakout_thr *= num_pels_log2_lookup[bsize];
best_rdc = this_rdc;
if (bsize_at_least_8x8) pc_tree->partitioning = PARTITION_NONE;
// If all y, u, v transform blocks in this partition are skippable, and
// the dist & rate are within the thresholds, the partition search is
......@@ -2225,8 +2231,8 @@ static void rd_pick_partition(AV1_COMP *cpi, ThreadData *td,
if (!x->e_mbd.lossless[xd->mi[0]->mbmi.segment_id] &&
(ctx->skippable && best_rdc.dist < dist_breakout_thr &&
best_rdc.rate < rate_breakout_thr)) {
do_split = 0;
do_rect = 0;
do_square_split = 0;
do_rectangular_split = 0;
}
#if CONFIG_FP_MB_STATS
......@@ -2235,7 +2241,7 @@ static void rd_pick_partition(AV1_COMP *cpi, ThreadData *td,
// If that is the case, check the difference variance between the
// current frame and the last frame. If the variance is small enough,
// stop further splitting in RD optimization
if (cpi->use_fp_mb_stats && do_split != 0 &&