Commit 52648448 authored by Urvang Joshi's avatar Urvang Joshi

Code cleanup: mainly rd_pick_partition and methods called from there.

- Const correctness
- Refactoring
- Make variables local when possible etc
- Remove -Wcast-qual to allow explicitly casting away const.

Cherry-picked from aomedia/master: c27fcccc
And then a number of more const correctness changes to make sure other
experiments build OK.

Change-Id: I77c18d99d21218fbdc9b186d7ed3792dc401a0a0
parent 40f1d487
......@@ -357,7 +357,7 @@ typedef struct macroblockd {
FRAME_CONTEXT *fc;
/* pointers to reference frames */
RefBuffer *block_refs[2];
const RefBuffer *block_refs[2];
/* pointer to current frame */
const YV12_BUFFER_CONFIG *cur_buf;
......
......@@ -763,8 +763,8 @@ void av1_find_best_ref_mvs(int allow_hp, int_mv *mvlist, int_mv *nearest_mv,
*near_mv = mvlist[1];
}
void av1_append_sub8x8_mvs_for_idx(AV1_COMMON *cm, MACROBLOCKD *xd, int block,
int ref, int mi_row, int mi_col,
void av1_append_sub8x8_mvs_for_idx(const AV1_COMMON *cm, MACROBLOCKD *xd,
int block, int ref, int mi_row, int mi_col,
#if CONFIG_REF_MV
CANDIDATE_MV *ref_mv_stack,
uint8_t *ref_mv_count,
......
......@@ -465,8 +465,8 @@ void av1_find_mv_refs(const AV1_COMMON *cm, const MACROBLOCKD *xd,
void av1_find_best_ref_mvs(int allow_hp, int_mv *mvlist, int_mv *nearest_mv,
int_mv *near_mv);
void av1_append_sub8x8_mvs_for_idx(AV1_COMMON *cm, MACROBLOCKD *xd, int block,
int ref, int mi_row, int mi_col,
void av1_append_sub8x8_mvs_for_idx(const AV1_COMMON *cm, MACROBLOCKD *xd,
int block, int ref, int mi_row, int mi_col,
#if CONFIG_REF_MV
CANDIDATE_MV *ref_mv_stack,
uint8_t *ref_mv_count,
......
......@@ -20,7 +20,7 @@
extern "C" {
#endif
static INLINE int get_segment_id(const AV1_COMMON *cm,
static INLINE int get_segment_id(const AV1_COMMON *const cm,
const uint8_t *segment_ids, BLOCK_SIZE bsize,
int mi_row, int mi_col) {
const int mi_offset = mi_row * cm->mi_cols + mi_col;
......
......@@ -1170,7 +1170,7 @@ const uint8_t *av1_get_obmc_mask(int length) {
// top/left neighboring blocks' inter predictors with the regular inter
// prediction. We assume the original prediction (bmc) is stored in
// xd->plane[].dst.buf
void av1_build_obmc_inter_prediction(AV1_COMMON *cm, MACROBLOCKD *xd,
void av1_build_obmc_inter_prediction(const AV1_COMMON *cm, MACROBLOCKD *xd,
int mi_row, int mi_col,
uint8_t *above[MAX_MB_PLANE],
int above_stride[MAX_MB_PLANE],
......@@ -1281,7 +1281,7 @@ void modify_neighbor_predictor_for_obmc(MB_MODE_INFO *mbmi) {
}
#endif // CONFIG_EXT_INTER
void av1_build_prediction_by_above_preds(AV1_COMMON *cm, MACROBLOCKD *xd,
void av1_build_prediction_by_above_preds(const AV1_COMMON *cm, MACROBLOCKD *xd,
int mi_row, int mi_col,
uint8_t *tmp_buf[MAX_MB_PLANE],
int tmp_width[MAX_MB_PLANE],
......@@ -1319,8 +1319,8 @@ void av1_build_prediction_by_above_preds(AV1_COMMON *cm, MACROBLOCKD *xd,
pd->subsampling_y);
}
for (ref = 0; ref < 1 + has_second_ref(above_mbmi); ++ref) {
MV_REFERENCE_FRAME frame = above_mbmi->ref_frame[ref];
RefBuffer *ref_buf = &cm->frame_refs[frame - LAST_FRAME];
const MV_REFERENCE_FRAME frame = above_mbmi->ref_frame[ref];
const RefBuffer *const ref_buf = &cm->frame_refs[frame - LAST_FRAME];
xd->block_refs[ref] = ref_buf;
if ((!av1_is_valid_scale(&ref_buf->sf)))
......@@ -1378,7 +1378,7 @@ void av1_build_prediction_by_above_preds(AV1_COMMON *cm, MACROBLOCKD *xd,
xd->mb_to_left_edge = -((mi_col * MI_SIZE) * 8);
}
void av1_build_prediction_by_left_preds(AV1_COMMON *cm, MACROBLOCKD *xd,
void av1_build_prediction_by_left_preds(const AV1_COMMON *cm, MACROBLOCKD *xd,
int mi_row, int mi_col,
uint8_t *tmp_buf[MAX_MB_PLANE],
int tmp_width[MAX_MB_PLANE],
......@@ -1416,8 +1416,8 @@ void av1_build_prediction_by_left_preds(AV1_COMMON *cm, MACROBLOCKD *xd,
pd->subsampling_y);
}
for (ref = 0; ref < 1 + has_second_ref(left_mbmi); ++ref) {
MV_REFERENCE_FRAME frame = left_mbmi->ref_frame[ref];
RefBuffer *ref_buf = &cm->frame_refs[frame - LAST_FRAME];
const MV_REFERENCE_FRAME frame = left_mbmi->ref_frame[ref];
const RefBuffer *const ref_buf = &cm->frame_refs[frame - LAST_FRAME];
xd->block_refs[ref] = ref_buf;
if ((!av1_is_valid_scale(&ref_buf->sf)))
......
......@@ -517,19 +517,19 @@ static INLINE int av1_is_interp_needed(const MACROBLOCKD *const xd) {
#if CONFIG_MOTION_VAR
const uint8_t *av1_get_obmc_mask(int length);
void av1_build_obmc_inter_prediction(AV1_COMMON *cm, MACROBLOCKD *xd,
void av1_build_obmc_inter_prediction(const AV1_COMMON *cm, MACROBLOCKD *xd,
int mi_row, int mi_col,
uint8_t *above[MAX_MB_PLANE],
int above_stride[MAX_MB_PLANE],
uint8_t *left[MAX_MB_PLANE],
int left_stride[MAX_MB_PLANE]);
void av1_build_prediction_by_above_preds(AV1_COMMON *cm, MACROBLOCKD *xd,
void av1_build_prediction_by_above_preds(const AV1_COMMON *cm, MACROBLOCKD *xd,
int mi_row, int mi_col,
uint8_t *tmp_buf[MAX_MB_PLANE],
int tmp_width[MAX_MB_PLANE],
int tmp_height[MAX_MB_PLANE],
int tmp_stride[MAX_MB_PLANE]);
void av1_build_prediction_by_left_preds(AV1_COMMON *cm, MACROBLOCKD *xd,
void av1_build_prediction_by_left_preds(const AV1_COMMON *cm, MACROBLOCKD *xd,
int mi_row, int mi_col,
uint8_t *tmp_buf[MAX_MB_PLANE],
int tmp_width[MAX_MB_PLANE],
......
......@@ -111,9 +111,9 @@ void av1_setup_in_frame_q_adj(AV1_COMP *cpi) {
// Select a segment for the current block.
// The choice of segment for a block depends on the ratio of the projected
// bits for the block vs a target average and its spatial complexity.
void av1_caq_select_segment(AV1_COMP *cpi, MACROBLOCK *mb, BLOCK_SIZE bs,
void av1_caq_select_segment(const AV1_COMP *cpi, MACROBLOCK *mb, BLOCK_SIZE bs,
int mi_row, int mi_col, int projected_rate) {
AV1_COMMON *const cm = &cpi->common;
const AV1_COMMON *const cm = &cpi->common;
const int mi_offset = mi_row * cm->mi_cols + mi_col;
const int xmis = AOMMIN(cm->mi_cols - mi_col, num_8x8_blocks_wide_lookup[bs]);
......
......@@ -22,7 +22,7 @@ struct AV1_COMP;
struct macroblock;
// Select a segment for the current Block.
void av1_caq_select_segment(struct AV1_COMP *cpi, struct macroblock *,
void av1_caq_select_segment(const struct AV1_COMP *cpi, struct macroblock *,
BLOCK_SIZE bs, int mi_row, int mi_col,
int projected_rate);
......
......@@ -209,7 +209,7 @@ int av1_cyclic_refresh_rc_bits_per_mb(const AV1_COMP *cpi, int i,
// Prior to coding a given prediction block, of size bsize at (mi_row, mi_col),
// check if we should reset the segment_id, and update the cyclic_refresh map
// and segmentation map.
void av1_cyclic_refresh_update_segment(AV1_COMP *const cpi,
void av1_cyclic_refresh_update_segment(const AV1_COMP *cpi,
MB_MODE_INFO *const mbmi, int mi_row,
int mi_col, BLOCK_SIZE bsize,
int64_t rate, int64_t dist, int skip) {
......
......@@ -49,7 +49,7 @@ int av1_cyclic_refresh_rc_bits_per_mb(const struct AV1_COMP *cpi, int i,
// Prior to coding a given prediction block, of size bsize at (mi_row, mi_col),
// check if we should reset the segment_id, and update the cyclic_refresh map
// and segmentation map.
void av1_cyclic_refresh_update_segment(struct AV1_COMP *const cpi,
void av1_cyclic_refresh_update_segment(const struct AV1_COMP *cpi,
MB_MODE_INFO *const mbmi, int mi_row,
int mi_col, BLOCK_SIZE bsize,
int64_t rate, int64_t dist, int skip);
......
......@@ -141,7 +141,7 @@ static void aq_highbd_8_variance(const uint8_t *a8, int a_stride,
}
#endif // CONFIG_AOM_HIGHBITDEPTH
static unsigned int block_variance(AV1_COMP *cpi, MACROBLOCK *x,
static unsigned int block_variance(const AV1_COMP *const cpi, MACROBLOCK *x,
BLOCK_SIZE bs) {
MACROBLOCKD *xd = &x->e_mbd;
unsigned int var, sse;
......@@ -189,14 +189,14 @@ static unsigned int block_variance(AV1_COMP *cpi, MACROBLOCK *x,
}
}
double av1_log_block_var(AV1_COMP *cpi, MACROBLOCK *x, BLOCK_SIZE bs) {
double av1_log_block_var(const AV1_COMP *cpi, MACROBLOCK *x, BLOCK_SIZE bs) {
unsigned int var = block_variance(cpi, x, bs);
aom_clear_system_state();
return log(var + 1.0);
}
#define DEFAULT_E_MIDPOINT 10.0
int av1_block_energy(AV1_COMP *cpi, MACROBLOCK *x, BLOCK_SIZE bs) {
int av1_block_energy(const AV1_COMP *cpi, MACROBLOCK *x, BLOCK_SIZE bs) {
double energy;
double energy_midpoint;
aom_clear_system_state();
......
......@@ -21,8 +21,8 @@ extern "C" {
unsigned int av1_vaq_segment_id(int energy);
void av1_vaq_frame_setup(AV1_COMP *cpi);
int av1_block_energy(AV1_COMP *cpi, MACROBLOCK *x, BLOCK_SIZE bs);
double av1_log_block_var(AV1_COMP *cpi, MACROBLOCK *x, BLOCK_SIZE bs);
int av1_block_energy(const AV1_COMP *cpi, MACROBLOCK *x, BLOCK_SIZE bs);
double av1_log_block_var(const AV1_COMP *cpi, MACROBLOCK *x, BLOCK_SIZE bs);
#ifdef __cplusplus
} // extern "C"
......
This diff is collapsed.
......@@ -710,7 +710,7 @@ static INLINE int get_ref_frame_map_idx(const AV1_COMP *cpi,
return cpi->alt_fb_idx;
}
static INLINE int get_ref_frame_buf_idx(const AV1_COMP *const cpi,
static INLINE int get_ref_frame_buf_idx(const AV1_COMP *cpi,
MV_REFERENCE_FRAME ref_frame) {
const AV1_COMMON *const cm = &cpi->common;
const int map_idx = get_ref_frame_map_idx(cpi, ref_frame);
......@@ -718,15 +718,15 @@ static INLINE int get_ref_frame_buf_idx(const AV1_COMP *const cpi,
}
static INLINE YV12_BUFFER_CONFIG *get_ref_frame_buffer(
AV1_COMP *cpi, MV_REFERENCE_FRAME ref_frame) {
AV1_COMMON *const cm = &cpi->common;
const AV1_COMP *cpi, MV_REFERENCE_FRAME ref_frame) {
const AV1_COMMON *const cm = &cpi->common;
const int buf_idx = get_ref_frame_buf_idx(cpi, ref_frame);
return buf_idx != INVALID_IDX ? &cm->buffer_pool->frame_bufs[buf_idx].buf
: NULL;
}
static INLINE const YV12_BUFFER_CONFIG *get_upsampled_ref(
AV1_COMP *cpi, const MV_REFERENCE_FRAME ref_frame) {
const AV1_COMP *cpi, const MV_REFERENCE_FRAME ref_frame) {
// Use up-sampled reference frames.
const int buf_idx =
cpi->upsampled_ref_idx[get_ref_frame_map_idx(cpi, ref_frame)];
......@@ -797,7 +797,7 @@ static INLINE int is_bwdref_enabled(const AV1_COMP *const cpi) {
}
#endif // CONFIG_EXT_REFS
static INLINE void set_ref_ptrs(AV1_COMMON *cm, MACROBLOCKD *xd,
static INLINE void set_ref_ptrs(const AV1_COMMON *cm, MACROBLOCKD *xd,
MV_REFERENCE_FRAME ref0,
MV_REFERENCE_FRAME ref1) {
xd->block_refs[0] =
......
......@@ -1805,9 +1805,9 @@ unsigned int av1_int_pro_motion_estimation(const AV1_COMP *cpi, MACROBLOCK *x,
/* do_refine: If last step (1-away) of n-step search doesn't pick the center
point as the best match, we will do a final 1-away diamond
refining search */
static int full_pixel_diamond(AV1_COMP *cpi, MACROBLOCK *x, MV *mvp_full,
int step_param, int sadpb, int further_steps,
int do_refine, int *cost_list,
static int full_pixel_diamond(const AV1_COMP *const cpi, MACROBLOCK *x,
MV *mvp_full, int step_param, int sadpb,
int further_steps, int do_refine, int *cost_list,
const aom_variance_fn_ptr_t *fn_ptr,
const MV *ref_mv) {
MV temp_mv;
......@@ -1870,7 +1870,7 @@ static int full_pixel_diamond(AV1_COMP *cpi, MACROBLOCK *x, MV *mvp_full,
#define MIN_INTERVAL 1
// Runs an limited range exhaustive mesh search using a pattern set
// according to the encode speed profile.
static int full_pixel_exhaustive(AV1_COMP *cpi, MACROBLOCK *x,
static int full_pixel_exhaustive(const AV1_COMP *const cpi, MACROBLOCK *x,
const MV *centre_mv_full, int sadpb,
int *cost_list,
const aom_variance_fn_ptr_t *fn_ptr,
......@@ -2243,7 +2243,7 @@ int av1_refining_search_8p_c(MACROBLOCK *x, int error_per_bit, int search_range,
}
#define MIN_EX_SEARCH_LIMIT 128
static int is_exhaustive_allowed(AV1_COMP *cpi, MACROBLOCK *x) {
static int is_exhaustive_allowed(const AV1_COMP *const cpi, MACROBLOCK *x) {
const SPEED_FEATURES *const sf = &cpi->sf;
const int max_ex =
AOMMAX(MIN_EX_SEARCH_LIMIT,
......@@ -2254,13 +2254,13 @@ static int is_exhaustive_allowed(AV1_COMP *cpi, MACROBLOCK *x) {
(*x->ex_search_count_ptr <= max_ex) && !cpi->rc.is_src_frame_alt_ref;
}
int av1_full_pixel_search(AV1_COMP *cpi, MACROBLOCK *x, BLOCK_SIZE bsize,
int av1_full_pixel_search(const AV1_COMP *cpi, MACROBLOCK *x, BLOCK_SIZE bsize,
MV *mvp_full, int step_param, int error_per_bit,
int *cost_list, const MV *ref_mv, int var_max,
int rd) {
const SPEED_FEATURES *const sf = &cpi->sf;
const SEARCH_METHODS method = sf->mv.search_method;
aom_variance_fn_ptr_t *fn_ptr = &cpi->fn_ptr[bsize];
const aom_variance_fn_ptr_t *fn_ptr = &cpi->fn_ptr[bsize];
int var = 0;
if (cost_list) {
......@@ -2530,7 +2530,7 @@ static unsigned int upsampled_setup_masked_center_error(
}
int av1_find_best_masked_sub_pixel_tree_up(
AV1_COMP *cpi, MACROBLOCK *x, const uint8_t *mask, int mask_stride,
const AV1_COMP *cpi, MACROBLOCK *x, const uint8_t *mask, int mask_stride,
int mi_row, int mi_col, MV *bestmv, const MV *ref_mv, int allow_hp,
int error_per_bit, const aom_variance_fn_ptr_t *vfp, int forced_stop,
int iters_per_step, int *mvjcost, int *mvcost[2], int *distortion,
......@@ -3031,7 +3031,7 @@ static unsigned int upsampled_setup_obmc_center_error(
}
int av1_find_best_obmc_sub_pixel_tree_up(
AV1_COMP *cpi, MACROBLOCK *x, int mi_row, int mi_col, MV *bestmv,
const AV1_COMP *cpi, MACROBLOCK *x, int mi_row, int mi_col, MV *bestmv,
const MV *ref_mv, int allow_hp, int error_per_bit,
const aom_variance_fn_ptr_t *vfp, int forced_stop, int iters_per_step,
int *mvjcost, int *mvcost[2], int *distortion, unsigned int *sse1,
......
......@@ -114,10 +114,10 @@ int av1_refining_search_8p_c(MACROBLOCK *x, int error_per_bit, int search_range,
struct AV1_COMP;
int av1_full_pixel_search(struct AV1_COMP *cpi, MACROBLOCK *x, BLOCK_SIZE bsize,
MV *mvp_full, int step_param, int error_per_bit,
int *cost_list, const MV *ref_mv, int var_max,
int rd);
int av1_full_pixel_search(const struct AV1_COMP *cpi, MACROBLOCK *x,
BLOCK_SIZE bsize, MV *mvp_full, int step_param,
int error_per_bit, int *cost_list, const MV *ref_mv,
int var_max, int rd);
#if CONFIG_EXT_INTER
int av1_find_best_masked_sub_pixel_tree(
......@@ -127,11 +127,11 @@ int av1_find_best_masked_sub_pixel_tree(
int *mvjcost, int *mvcost[2], int *distortion, unsigned int *sse1,
int is_second);
int av1_find_best_masked_sub_pixel_tree_up(
struct AV1_COMP *cpi, MACROBLOCK *x, const uint8_t *mask, int mask_stride,
int mi_row, int mi_col, MV *bestmv, const MV *ref_mv, int allow_hp,
int error_per_bit, const aom_variance_fn_ptr_t *vfp, int forced_stop,
int iters_per_step, int *mvjcost, int *mvcost[2], int *distortion,
unsigned int *sse1, int is_second, int use_upsampled_ref);
const struct AV1_COMP *cpi, MACROBLOCK *x, const uint8_t *mask,
int mask_stride, int mi_row, int mi_col, MV *bestmv, const MV *ref_mv,
int allow_hp, int error_per_bit, const aom_variance_fn_ptr_t *vfp,
int forced_stop, int iters_per_step, int *mvjcost, int *mvcost[2],
int *distortion, unsigned int *sse1, int is_second, int use_upsampled_ref);
int av1_masked_full_pixel_diamond(const struct AV1_COMP *cpi, MACROBLOCK *x,
const uint8_t *mask, int mask_stride,
MV *mvp_full, int step_param, int sadpb,
......@@ -147,8 +147,8 @@ int av1_obmc_full_pixel_diamond(const struct AV1_COMP *cpi, MACROBLOCK *x,
const aom_variance_fn_ptr_t *fn_ptr,
const MV *ref_mv, MV *dst_mv, int is_second);
int av1_find_best_obmc_sub_pixel_tree_up(
struct AV1_COMP *cpi, MACROBLOCK *x, int mi_row, int mi_col, MV *bestmv,
const MV *ref_mv, int allow_hp, int error_per_bit,
const struct AV1_COMP *cpi, MACROBLOCK *x, int mi_row, int mi_col,
MV *bestmv, const MV *ref_mv, int allow_hp, int error_per_bit,
const aom_variance_fn_ptr_t *vfp, int forced_stop, int iters_per_step,
int *mvjcost, int *mvcost[2], int *distortion, unsigned int *sse1,
int is_second, int use_upsampled_ref);
......
......@@ -619,7 +619,7 @@ void av1_get_entropy_contexts(BLOCK_SIZE bsize, TX_SIZE tx_size,
get_entropy_contexts_plane(plane_bsize, tx_size, pd, t_above, t_left);
}
void av1_mv_pred(AV1_COMP *cpi, MACROBLOCK *x, uint8_t *ref_y_buffer,
void av1_mv_pred(const AV1_COMP *cpi, MACROBLOCK *x, uint8_t *ref_y_buffer,
int ref_y_stride, int ref_frame, BLOCK_SIZE block_size) {
int i;
int zero_seen = 0;
......
......@@ -398,8 +398,7 @@ void av1_initialize_me_consts(const struct AV1_COMP *cpi, MACROBLOCK *x,
void av1_model_rd_from_var_lapndz(int64_t var, unsigned int n,
unsigned int qstep, int *rate, int64_t *dist);
int av1_get_switchable_rate(const struct AV1_COMP *cpi,
const MACROBLOCKD *const xd);
int av1_get_switchable_rate(const struct AV1_COMP *cpi, const MACROBLOCKD *xd);
int av1_raster_block_offset(BLOCK_SIZE plane_bsize, int raster_block,
int stride);
......@@ -438,8 +437,9 @@ static INLINE int rd_less_than_thresh(int64_t best_rd, int thresh,
return best_rd < ((int64_t)thresh * thresh_fact >> 5) || thresh == INT_MAX;
}
void av1_mv_pred(struct AV1_COMP *cpi, MACROBLOCK *x, uint8_t *ref_y_buffer,
int ref_y_stride, int ref_frame, BLOCK_SIZE block_size);
void av1_mv_pred(const struct AV1_COMP *cpi, MACROBLOCK *x,
uint8_t *ref_y_buffer, int ref_y_stride, int ref_frame,
BLOCK_SIZE block_size);
static INLINE void set_error_per_bit(MACROBLOCK *x, int rdmult) {
x->errorperbit = rdmult >> RD_EPB_SHIFT;
......
This diff is collapsed.
......@@ -29,20 +29,20 @@ struct RD_COST;
int av1_cost_coeffs(MACROBLOCK *x, int plane, int block, int coeff_ctx,
TX_SIZE tx_size, const int16_t *scan, const int16_t *nb,
int use_fast_coef_costing);
void av1_rd_pick_intra_mode_sb(struct AV1_COMP *cpi, struct macroblock *x,
void av1_rd_pick_intra_mode_sb(const struct AV1_COMP *cpi, struct macroblock *x,
struct RD_COST *rd_cost, BLOCK_SIZE bsize,
PICK_MODE_CONTEXT *ctx, int64_t best_rd);
unsigned int av1_get_sby_perpixel_variance(AV1_COMP *cpi,
unsigned int av1_get_sby_perpixel_variance(const AV1_COMP *cpi,
const struct buf_2d *ref,
BLOCK_SIZE bs);
#if CONFIG_AOM_HIGHBITDEPTH
unsigned int av1_high_get_sby_perpixel_variance(AV1_COMP *cpi,
unsigned int av1_high_get_sby_perpixel_variance(const AV1_COMP *cpi,
const struct buf_2d *ref,
BLOCK_SIZE bs, int bd);
#endif
void av1_rd_pick_inter_mode_sb(struct AV1_COMP *cpi,
void av1_rd_pick_inter_mode_sb(const struct AV1_COMP *cpi,
struct TileDataEnc *tile_data,
struct macroblock *x, int mi_row, int mi_col,
struct RD_COST *rd_cost,
......@@ -53,16 +53,16 @@ void av1_rd_pick_inter_mode_sb(struct AV1_COMP *cpi,
int64_t best_rd_so_far);
void av1_rd_pick_inter_mode_sb_seg_skip(
struct AV1_COMP *cpi, struct TileDataEnc *tile_data, struct macroblock *x,
struct RD_COST *rd_cost, BLOCK_SIZE bsize, PICK_MODE_CONTEXT *ctx,
int64_t best_rd_so_far);
const struct AV1_COMP *cpi, struct TileDataEnc *tile_data,
struct macroblock *x, struct RD_COST *rd_cost, BLOCK_SIZE bsize,
PICK_MODE_CONTEXT *ctx, int64_t best_rd_so_far);
int av1_internal_image_edge(struct AV1_COMP *cpi);
int av1_active_h_edge(struct AV1_COMP *cpi, int mi_row, int mi_step);
int av1_active_v_edge(struct AV1_COMP *cpi, int mi_col, int mi_step);
int av1_active_edge_sb(struct AV1_COMP *cpi, int mi_row, int mi_col);
int av1_internal_image_edge(const struct AV1_COMP *cpi);
int av1_active_h_edge(const struct AV1_COMP *cpi, int mi_row, int mi_step);
int av1_active_v_edge(const struct AV1_COMP *cpi, int mi_col, int mi_step);
int av1_active_edge_sb(const struct AV1_COMP *cpi, int mi_row, int mi_col);
void av1_rd_pick_inter_mode_sub8x8(struct AV1_COMP *cpi,
void av1_rd_pick_inter_mode_sub8x8(const struct AV1_COMP *cpi,
struct TileDataEnc *tile_data,
struct macroblock *x, int mi_row, int mi_col,
struct RD_COST *rd_cost,
......
......@@ -344,7 +344,7 @@ const struct av1_token av1_coef_encodings[ENTROPY_TOKENS] = {
#endif // !CONFIG_ANS
struct tokenize_b_args {
AV1_COMP *cpi;
const AV1_COMP *cpi;
ThreadData *td;
TOKENEXTRA **tp;
int this_rate;
......@@ -409,7 +409,7 @@ static INLINE int get_tx_eob(const struct segmentation *seg, int segment_id,
}
#if CONFIG_PALETTE
void av1_tokenize_palette_sb(AV1_COMP *cpi, struct ThreadData *const td,
void av1_tokenize_palette_sb(const AV1_COMP *cpi, struct ThreadData *const td,
int plane, TOKENEXTRA **t, RUN_TYPE dry_run,
BLOCK_SIZE bsize, int *rate) {
MACROBLOCK *const x = &td->mb;
......@@ -454,7 +454,7 @@ void av1_tokenize_palette_sb(AV1_COMP *cpi, struct ThreadData *const td,
static void tokenize_b(int plane, int block, int blk_row, int blk_col,
BLOCK_SIZE plane_bsize, TX_SIZE tx_size, void *arg) {
struct tokenize_b_args *const args = arg;
AV1_COMP *cpi = args->cpi;
const AV1_COMP *cpi = args->cpi;
ThreadData *const td = args->td;
MACROBLOCK *const x = &td->mb;
MACROBLOCKD *const xd = &x->e_mbd;
......@@ -481,7 +481,7 @@ static void tokenize_b(int plane, int block, int blk_row, int blk_col,
unsigned int(*const counts)[COEFF_CONTEXTS][ENTROPY_TOKENS] =
td->rd_counts.coef_counts[txsize_sqr_map[tx_size]][type][ref];
#if CONFIG_ENTROPY
aom_prob(*coef_probs)[COEFF_CONTEXTS][UNCONSTRAINED_NODES] =
const aom_prob(*coef_probs)[COEFF_CONTEXTS][UNCONSTRAINED_NODES] =
cpi->subframe_stats.coef_probs_buf[cpi->common.coef_probs_update_idx]
[txsize_sqr_map[tx_size]][type][ref];
#else
......@@ -641,10 +641,10 @@ void tokenize_vartx(ThreadData *td, TOKENEXTRA **t, RUN_TYPE dry_run,
}
}
void av1_tokenize_sb_vartx(AV1_COMP *cpi, ThreadData *td, TOKENEXTRA **t,
void av1_tokenize_sb_vartx(const AV1_COMP *cpi, ThreadData *td, TOKENEXTRA **t,
RUN_TYPE dry_run, int mi_row, int mi_col,
BLOCK_SIZE bsize, int *rate) {
AV1_COMMON *const cm = &cpi->common;
const AV1_COMMON *const cm = &cpi->common;
MACROBLOCK *const x = &td->mb;
MACROBLOCKD *const xd = &x->e_mbd;
MB_MODE_INFO *const mbmi = &xd->mi[0]->mbmi;
......@@ -696,9 +696,9 @@ void av1_tokenize_sb_vartx(AV1_COMP *cpi, ThreadData *td, TOKENEXTRA **t,
}
#endif // CONFIG_VAR_TX
void av1_tokenize_sb(AV1_COMP *cpi, ThreadData *td, TOKENEXTRA **t,
void av1_tokenize_sb(const AV1_COMP *cpi, ThreadData *td, TOKENEXTRA **t,
RUN_TYPE dry_run, BLOCK_SIZE bsize, int *rate) {
AV1_COMMON *const cm = &cpi->common;
const AV1_COMMON *const cm = &cpi->common;
MACROBLOCK *const x = &td->mb;
MACROBLOCKD *const xd = &x->e_mbd;
MB_MODE_INFO *const mbmi = &xd->mi[0]->mbmi;
......@@ -732,9 +732,10 @@ void av1_tokenize_sb(AV1_COMP *cpi, ThreadData *td, TOKENEXTRA **t,
}
#if CONFIG_SUPERTX
void av1_tokenize_sb_supertx(AV1_COMP *cpi, ThreadData *td, TOKENEXTRA **t,
RUN_TYPE dry_run, BLOCK_SIZE bsize, int *rate) {
AV1_COMMON *const cm = &cpi->common;
void av1_tokenize_sb_supertx(const AV1_COMP *cpi, ThreadData *td,
TOKENEXTRA **t, RUN_TYPE dry_run, BLOCK_SIZE bsize,
int *rate) {
const AV1_COMMON *const cm = &cpi->common;
MACROBLOCKD *const xd = &td->mb.e_mbd;
MB_MODE_INFO *const mbmi = &xd->mi[0]->mbmi;
TOKENEXTRA *t_backup = *t;
......
......@@ -66,20 +66,21 @@ typedef enum {
// with the coefficient token cost only if dry_run = DRY_RUN_COSTCOEFS,
// otherwise rate is not incremented.
#if CONFIG_VAR_TX
void av1_tokenize_sb_vartx(struct AV1_COMP *cpi, struct ThreadData *td,
void av1_tokenize_sb_vartx(const struct AV1_COMP *cpi, struct ThreadData *td,
TOKENEXTRA **t, RUN_TYPE dry_run, int mi_row,
int mi_col, BLOCK_SIZE bsize, int *rate);
#endif
#if CONFIG_PALETTE
void av1_tokenize_palette_sb(struct AV1_COMP *cpi, struct ThreadData *const td,
int plane, TOKENEXTRA **t, RUN_TYPE dry_run,
BLOCK_SIZE bsize, int *rate);
void av1_tokenize_palette_sb(const struct AV1_COMP *cpi,
struct ThreadData *const td, int plane,
TOKENEXTRA **t, RUN_TYPE dry_run, BLOCK_SIZE bsize,
int *rate);
#endif // CONFIG_PALETTE
void av1_tokenize_sb(struct AV1_COMP *cpi, struct ThreadData *td,
void av1_tokenize_sb(const struct AV1_COMP *cpi, struct ThreadData *td,
TOKENEXTRA **t, RUN_TYPE dry_run, BLOCK_SIZE bsize,
int *rate);
#if CONFIG_SUPERTX
void av1_tokenize_sb_supertx(struct AV1_COMP *cpi, struct ThreadData *td,
void av1_tokenize_sb_supertx(const struct AV1_COMP *cpi, struct ThreadData *td,
TOKENEXTRA **t, RUN_TYPE dry_run, BLOCK_SIZE bsize,
int *rate);
#endif
......
......@@ -606,7 +606,6 @@ process_toolchain() {
check_add_cflags -Wfloat-conversion
check_add_cflags -Wpointer-arith
check_add_cflags -Wtype-limits
check_add_cflags -Wcast-qual
check_add_cflags -Wvla
check_add_cflags -Wimplicit-function-declaration
check_add_cflags -Wuninitialized
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment