Commit 368fbc95 authored by Urvang Joshi's avatar Urvang Joshi

Fix warnings reported by -Wshadow: Part2b: more from av1 directory

From code only part of nextgenv2 (and not aomedia)

Change-Id: I21f7478a59d525dff23747efe5238ded16b743d2
parent 454280da
......@@ -871,7 +871,6 @@ static int write_superframe_index(aom_codec_alg_priv_t *ctx) {
index_sz = 2 + (mag + 1) * (ctx->pending_frame_count - 1);
if (ctx->pending_cx_data_sz + index_sz < ctx->cx_data_sz) {
uint8_t *x = ctx->pending_cx_data + ctx->pending_cx_data_sz;
int i, j;
#ifdef TEST_SUPPLEMENTAL_SUPERFRAME_DATA
uint8_t marker_test = 0xc0;
int mag_test = 2; // 1 - 4
......@@ -890,6 +889,7 @@ static int write_superframe_index(aom_codec_alg_priv_t *ctx) {
*x++ = marker;
for (i = 0; i < ctx->pending_frame_count - 1; i++) {
unsigned int this_sz;
int j;
assert(ctx->pending_frame_sizes[i] > 0);
this_sz = (unsigned int)ctx->pending_frame_sizes[i] - 1;
......
......@@ -1483,7 +1483,6 @@ void av1_adapt_inter_frame_probs(AV1_COMMON *cm) {
#if CONFIG_SUPERTX
for (i = 0; i < PARTITION_SUPERTX_CONTEXTS; ++i) {
int j;
for (j = 1; j < TX_SIZES; ++j) {
fc->supertx_prob[i][j] = av1_mode_mv_merge_probs(
pre_fc->supertx_prob[i][j], counts->supertx[i][j]);
......@@ -1577,7 +1576,6 @@ void av1_adapt_intra_frame_probs(AV1_COMMON *cm) {
}
for (s = 1; s < EXT_TX_SETS_INTRA; ++s) {
if (use_intra_ext_tx_for_txsize[s][i]) {
int j;
for (j = 0; j < INTRA_MODES; ++j)
aom_tree_merge_probs(
av1_ext_tx_intra_tree[s], pre_fc->intra_ext_tx_prob[s][i][j],
......
......@@ -702,7 +702,7 @@ static void filter_intra_predictors_4tap(uint8_t *dst, ptrdiff_t stride, int bs,
const uint8_t *above,
const uint8_t *left, int mode) {
int k, r, c;
int pred[33][65];
int preds[33][65];
int mean, ipred;
const TX_SIZE tx_size =
(bs == 32) ? TX_32X32
......@@ -721,20 +721,20 @@ static void filter_intra_predictors_4tap(uint8_t *dst, ptrdiff_t stride, int bs,
}
mean = (mean + bs) / (2 * bs);
for (r = 0; r < bs; ++r) pred[r + 1][0] = (int)left[r] - mean;
for (r = 0; r < bs; ++r) preds[r + 1][0] = (int)left[r] - mean;
for (c = 0; c < 2 * bs + 1; ++c) pred[0][c] = (int)above[c - 1] - mean;
for (c = 0; c < 2 * bs + 1; ++c) preds[0][c] = (int)above[c - 1] - mean;
for (r = 1; r < bs + 1; ++r)
for (c = 1; c < 2 * bs + 1 - r; ++c) {
ipred = c0 * pred[r - 1][c] + c1 * pred[r][c - 1] +
c2 * pred[r - 1][c - 1] + c3 * pred[r - 1][c + 1];
pred[r][c] = ROUND_POWER_OF_TWO_SIGNED(ipred, FILTER_INTRA_PREC_BITS);
ipred = c0 * preds[r - 1][c] + c1 * preds[r][c - 1] +
c2 * preds[r - 1][c - 1] + c3 * preds[r - 1][c + 1];
preds[r][c] = ROUND_POWER_OF_TWO_SIGNED(ipred, FILTER_INTRA_PREC_BITS);
}
for (r = 0; r < bs; ++r) {
for (c = 0; c < bs; ++c) {
ipred = pred[r + 1][c + 1] + mean;
ipred = preds[r + 1][c + 1] + mean;
dst[c] = clip_pixel(ipred);
}
dst += stride;
......@@ -997,7 +997,7 @@ static void highbd_filter_intra_predictors_4tap(uint16_t *dst, ptrdiff_t stride,
const uint16_t *left, int mode,
int bd) {
int k, r, c;
int pred[33][65];
int preds[33][65];
int mean, ipred;
const TX_SIZE tx_size =
(bs == 32) ? TX_32X32
......@@ -1016,20 +1016,20 @@ static void highbd_filter_intra_predictors_4tap(uint16_t *dst, ptrdiff_t stride,
}
mean = (mean + bs) / (2 * bs);
for (r = 0; r < bs; ++r) pred[r + 1][0] = (int)left[r] - mean;
for (r = 0; r < bs; ++r) preds[r + 1][0] = (int)left[r] - mean;
for (c = 0; c < 2 * bs + 1; ++c) pred[0][c] = (int)above[c - 1] - mean;
for (c = 0; c < 2 * bs + 1; ++c) preds[0][c] = (int)above[c - 1] - mean;
for (r = 1; r < bs + 1; ++r)
for (c = 1; c < 2 * bs + 1 - r; ++c) {
ipred = c0 * pred[r - 1][c] + c1 * pred[r][c - 1] +
c2 * pred[r - 1][c - 1] + c3 * pred[r - 1][c + 1];
pred[r][c] = ROUND_POWER_OF_TWO_SIGNED(ipred, FILTER_INTRA_PREC_BITS);
ipred = c0 * preds[r - 1][c] + c1 * preds[r][c - 1] +
c2 * preds[r - 1][c - 1] + c3 * preds[r - 1][c + 1];
preds[r][c] = ROUND_POWER_OF_TWO_SIGNED(ipred, FILTER_INTRA_PREC_BITS);
}
for (r = 0; r < bs; ++r) {
for (c = 0; c < bs; ++c) {
ipred = pred[r + 1][c + 1] + mean;
ipred = preds[r + 1][c + 1] + mean;
dst[c] = clip_pixel_highbd(ipred, bd);
}
dst += stride;
......@@ -1188,8 +1188,6 @@ static void build_intra_predictors_high(
}
if (ext_intra_mode_info->use_ext_intra_mode[plane != 0]) {
EXT_INTRA_MODE ext_intra_mode =
ext_intra_mode_info->ext_intra_mode[plane != 0];
need_left = ext_intra_extend_modes[ext_intra_mode] & NEED_LEFT;
need_above = ext_intra_extend_modes[ext_intra_mode] & NEED_ABOVE;
}
......@@ -1202,7 +1200,6 @@ static void build_intra_predictors_high(
assert(n_bottomleft_px >= 0);
if ((!need_above && n_left_px == 0) || (!need_left && n_top_px == 0)) {
int i;
const int val = (n_left_px == 0) ? base + 1 : base - 1;
for (i = 0; i < bs; ++i) {
aom_memset16(dst, val, bs);
......@@ -1351,8 +1348,6 @@ static void build_intra_predictors(const MACROBLOCKD *xd, const uint8_t *ref,
}
if (ext_intra_mode_info->use_ext_intra_mode[plane != 0]) {
EXT_INTRA_MODE ext_intra_mode =
ext_intra_mode_info->ext_intra_mode[plane != 0];
need_left = ext_intra_extend_modes[ext_intra_mode] & NEED_LEFT;
need_above = ext_intra_extend_modes[ext_intra_mode] & NEED_ABOVE;
}
......@@ -1373,7 +1368,6 @@ static void build_intra_predictors(const MACROBLOCKD *xd, const uint8_t *ref,
assert(n_bottomleft_px >= 0);
if ((!need_above && n_left_px == 0) || (!need_left && n_top_px == 0)) {
int i;
const int val = (n_left_px == 0) ? 129 : 127;
for (i = 0; i < bs; ++i) {
memset(dst, val, bs);
......
......@@ -1308,8 +1308,8 @@ static void decode_block(AV1Decoder *const pbi, MACROBLOCKD *const xd,
const BLOCK_SIZE plane_bsize =
get_plane_block_size(AOMMAX(bsize, BLOCK_8X8), pd);
const TX_SIZE max_tx_size = max_txsize_lookup[plane_bsize];
int bw = num_4x4_blocks_wide_txsize_lookup[max_tx_size];
int bh = num_4x4_blocks_high_txsize_lookup[max_tx_size];
const int bw_var_tx = num_4x4_blocks_wide_txsize_lookup[max_tx_size];
const int bh_var_tx = num_4x4_blocks_high_txsize_lookup[max_tx_size];
const int step = num_4x4_blocks_txsize_lookup[max_tx_size];
int block = 0;
#if CONFIG_EXT_TX && CONFIG_RECT_TX
......@@ -1333,8 +1333,8 @@ static void decode_block(AV1Decoder *const pbi, MACROBLOCKD *const xd,
plane, row, col, tx_size);
} else {
#endif
for (row = 0; row < num_4x4_h; row += bh) {
for (col = 0; col < num_4x4_w; col += bw) {
for (row = 0; row < num_4x4_h; row += bh_var_tx) {
for (col = 0; col < num_4x4_w; col += bw_var_tx) {
decode_reconstruct_tx(xd, r, mbmi, plane, plane_bsize, block, row,
col, max_tx_size, &eobtotal);
block += step;
......
......@@ -1697,9 +1697,9 @@ static void write_modes_b(AV1_COMP *cpi, const TileInfo *const tile,
#endif
const TX_SIZE max_tx_size = max_txsize_lookup[plane_bsize];
const BLOCK_SIZE txb_size = txsize_to_bsize[max_tx_size];
int bw = num_4x4_blocks_wide_lookup[txb_size];
int block = 0;
const int step = num_4x4_blocks_txsize_lookup[max_tx_size];
bw = num_4x4_blocks_wide_lookup[txb_size];
for (row = 0; row < num_4x4_h; row += bw) {
for (col = 0; col < num_4x4_w; col += bw) {
pack_txb_tokens(w, tok, tok_end, xd, mbmi, plane, plane_bsize,
......@@ -1711,8 +1711,8 @@ static void write_modes_b(AV1_COMP *cpi, const TileInfo *const tile,
TX_SIZE tx = plane ? get_uv_tx_size(&m->mbmi, &xd->plane[plane])
: m->mbmi.tx_size;
BLOCK_SIZE txb_size = txsize_to_bsize[tx];
int bw = num_4x4_blocks_wide_lookup[txb_size];
int bh = num_4x4_blocks_high_lookup[txb_size];
bw = num_4x4_blocks_wide_lookup[txb_size];
bh = num_4x4_blocks_high_lookup[txb_size];
for (row = 0; row < num_4x4_h; row += bh)
for (col = 0; col < num_4x4_w; col += bw)
......@@ -2295,7 +2295,6 @@ static void update_coef_probs_subframe(
for (t = 0; t < entropy_nodes_update; ++t) {
aom_prob newp = new_coef_probs[i][j][k][l][t];
aom_prob *oldp = old_coef_probs[i][j][k][l] + t;
const aom_prob upd = DIFF_UPDATE_PROB;
int s;
int u = 0;
......@@ -2418,8 +2417,6 @@ static void update_coef_probs(AV1_COMP *cpi, aom_writer *w) {
#if CONFIG_ENTROPY
if (cm->do_subframe_update &&
cm->refresh_frame_context == REFRESH_FRAME_CONTEXT_BACKWARD) {
unsigned int eob_counts_copy[PLANE_TYPES][REF_TYPES][COEF_BANDS]
[COEFF_CONTEXTS];
av1_coeff_count coef_counts_copy[PLANE_TYPES];
av1_copy(eob_counts_copy, cpi->common.counts.eob_branch[tx_size]);
av1_copy(coef_counts_copy, cpi->td.rd_counts.coef_counts[tx_size]);
......
......@@ -2705,8 +2705,6 @@ static void rd_use_partition(AV1_COMP *cpi, ThreadData *td,
#if CONFIG_SUPERTX
int rt_nocoef = 0;
#endif
RD_SEARCH_MACROBLOCK_CONTEXT x_ctx;
if ((mi_row + y_idx >= cm->mi_rows) || (mi_col + x_idx >= cm->mi_cols))
continue;
......@@ -3140,12 +3138,12 @@ static void rd_test_partition3(
#else
if (sum_rdc.rdcost < best_rdc->rdcost) {
#endif
PICK_MODE_CONTEXT *ctx = &ctxs[0];
update_state(cpi, td, ctx, mi_row0, mi_col0, subsize0, 1);
PICK_MODE_CONTEXT *ctx_0 = &ctxs[0];
update_state(cpi, td, ctx_0, mi_row0, mi_col0, subsize0, 1);
encode_superblock(cpi, td, tp, DRY_RUN_NORMAL, mi_row0, mi_col0, subsize0,
ctx, NULL);
ctx_0, NULL);
if (cpi->sf.adaptive_motion_search) load_pred_mv(x, ctx);
if (cpi->sf.adaptive_motion_search) load_pred_mv(x, ctx_0);
#if CONFIG_SUPERTX
rd_pick_sb_modes(cpi, tile_data, x, mi_row1, mi_col1, &this_rdc,
......@@ -3181,12 +3179,12 @@ static void rd_test_partition3(
#else
if (sum_rdc.rdcost < best_rdc->rdcost) {
#endif
PICK_MODE_CONTEXT *ctx = &ctxs[1];
update_state(cpi, td, ctx, mi_row1, mi_col1, subsize1, 1);
PICK_MODE_CONTEXT *ctx_1 = &ctxs[1];
update_state(cpi, td, ctx_1, mi_row1, mi_col1, subsize1, 1);
encode_superblock(cpi, td, tp, DRY_RUN_NORMAL, mi_row1, mi_col1, subsize1,
ctx, NULL);
ctx_1, NULL);
if (cpi->sf.adaptive_motion_search) load_pred_mv(x, ctx);
if (cpi->sf.adaptive_motion_search) load_pred_mv(x, ctx_1);
#if CONFIG_SUPERTX
rd_pick_sb_modes(cpi, tile_data, x, mi_row2, mi_col2, &this_rdc,
......
......@@ -110,7 +110,6 @@ static int do_16x16_motion_search(AV1_COMP *cpi, const MV *ref_mv, int mb_row,
// If the current best reference mv is not centered on 0,0 then do a 0,0
// based search as well.
if (ref_mv->row != 0 || ref_mv->col != 0) {
unsigned int tmp_err;
MV zero_ref_mv = { 0, 0 };
tmp_err = do_16x16_motion_iteration(cpi, &zero_ref_mv, mb_row, mb_col);
......
......@@ -351,7 +351,6 @@ static void update_a_sep_sym(double **Mc, double **Hc, double *a, double *b) {
memset(A, 0, sizeof(A));
memset(B, 0, sizeof(B));
for (i = 0; i < RESTORATION_WIN; i++) {
int j;
for (j = 0; j < RESTORATION_WIN; ++j) {
const int jj = wrap_index(j);
A[jj] += Mc[i][j] * b[i];
......@@ -399,7 +398,6 @@ static void update_b_sep_sym(double **Mc, double **Hc, double *a, double *b) {
memset(A, 0, sizeof(A));
memset(B, 0, sizeof(B));
for (i = 0; i < RESTORATION_WIN; i++) {
int j;
const int ii = wrap_index(i);
for (j = 0; j < RESTORATION_WIN; j++) A[ii] += Mc[i][j] * a[j];
}
......
......@@ -1115,9 +1115,9 @@ void av1_init_quantizer(AV1_COMP *cpi) {
#if CONFIG_NEW_QUANT
for (dq = 0; dq < QUANT_PROFILES; dq++) {
for (i = 0; i < COEF_BANDS; i++) {
const int quant = cpi->y_dequant[q][i != 0];
const int y_quant = cpi->y_dequant[q][i != 0];
const int uvquant = cpi->uv_dequant[q][i != 0];
av1_get_dequant_val_nuq(quant, i, cpi->y_dequant_val_nuq[dq][q][i],
av1_get_dequant_val_nuq(y_quant, i, cpi->y_dequant_val_nuq[dq][q][i],
quants->y_cuml_bins_nuq[dq][q][i], dq);
av1_get_dequant_val_nuq(uvquant, i, cpi->uv_dequant_val_nuq[dq][q][i],
quants->uv_cuml_bins_nuq[dq][q][i], dq);
......
This diff is collapsed.
......@@ -613,8 +613,7 @@ void tokenize_vartx(ThreadData *td, TOKENEXTRA **t, RUN_TYPE dry_run,
: mbmi->inter_tx_size[tx_row][tx_col];
if (tx_size == plane_tx_size) {
const struct macroblockd_plane *const pd = &xd->plane[plane];
BLOCK_SIZE plane_bsize = get_plane_block_size(mbmi->sb_type, pd);
plane_bsize = get_plane_block_size(mbmi->sb_type, pd);
if (!dry_run)
tokenize_b(plane, block, blk_row, blk_col, plane_bsize, tx_size, arg);
else if (dry_run == DRY_RUN_NORMAL)
......
......@@ -31,12 +31,12 @@ typedef struct {
int64_t sum_error;
int log2_count;
int variance;
} var;
} VAR;
typedef struct {
var none;
var horz[2];
var vert[2];
VAR none;
VAR horz[2];
VAR vert[2];
} partition_variance;
typedef struct VAR_TREE {
......@@ -59,7 +59,7 @@ void av1_setup_var_tree(struct AV1Common *cm, struct ThreadData *td);
void av1_free_var_tree(struct ThreadData *td);
// Set variance values given sum square error, sum error, count.
static INLINE void fill_variance(int64_t s2, int64_t s, int c, var *v) {
static INLINE void fill_variance(int64_t s2, int64_t s, int c, VAR *v) {
v->sum_square_error = s2;
v->sum_error = s;
v->log2_count = c;
......@@ -69,7 +69,7 @@ static INLINE void fill_variance(int64_t s2, int64_t s, int c, var *v) {
v->log2_count);
}
static INLINE void sum_2_variances(const var *a, const var *b, var *r) {
static INLINE void sum_2_variances(const VAR *a, const VAR *b, VAR *r) {
assert(a->log2_count == b->log2_count);
fill_variance(a->sum_square_error + b->sum_square_error,
a->sum_error + b->sum_error, a->log2_count + 1, r);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment