Commit edced25a authored by Debargha Mukherjee's avatar Debargha Mukherjee
Browse files

Further cleanups related to removal of cb4x4 flags

Removes a bunch of unused code.

Change-Id: I86bda117d8d455452ee1ee3a2a566742650e05e0
parent c4ec0329
......@@ -1632,18 +1632,9 @@ static INLINE int is_nontrans_global_motion(const MACROBLOCKD *xd) {
const MODE_INFO *mi = xd->mi[0];
const MB_MODE_INFO *const mbmi = &mi->mbmi;
int ref;
const int unify_bsize = 1;
// First check if all modes are ZEROMV
if (mbmi->sb_type >= BLOCK_8X8 || unify_bsize) {
if (mbmi->mode != ZEROMV && mbmi->mode != ZERO_ZEROMV) return 0;
} else {
if ((mi->bmi[0].as_mode != ZEROMV && mi->bmi[0].as_mode != ZERO_ZEROMV) ||
(mi->bmi[1].as_mode != ZEROMV && mi->bmi[1].as_mode != ZERO_ZEROMV) ||
(mi->bmi[2].as_mode != ZEROMV && mi->bmi[2].as_mode != ZERO_ZEROMV) ||
(mi->bmi[3].as_mode != ZEROMV && mi->bmi[3].as_mode != ZERO_ZEROMV))
return 0;
}
if (mbmi->mode != ZEROMV && mbmi->mode != ZERO_ZEROMV) return 0;
#if !GLOBAL_SUB8X8_USED
if (mbmi->sb_type < BLOCK_8X8) return 0;
......
......@@ -244,10 +244,10 @@ static INLINE int_mv gm_get_motion_vector(const WarpedMotionParams *gm,
int is_integer
#endif
) {
const int unify_bsize = 1;
int_mv res;
const int32_t *mat = gm->wmmat;
int x, y, tx, ty;
(void)block_idx;
if (gm->wmtype == TRANSLATION) {
// All global motion vectors are stored with WARPEDMODEL_PREC_BITS (16)
......@@ -269,15 +269,8 @@ static INLINE int_mv gm_get_motion_vector(const WarpedMotionParams *gm,
return res;
}
if (bsize >= BLOCK_8X8 || unify_bsize) {
x = block_center_x(mi_col, bsize);
y = block_center_y(mi_row, bsize);
} else {
x = block_center_x(mi_col, bsize);
y = block_center_y(mi_row, bsize);
x += (block_idx & 1) * MI_SIZE / 2;
y += (block_idx & 2) * MI_SIZE / 4;
}
x = block_center_x(mi_col, bsize);
y = block_center_y(mi_row, bsize);
if (gm->wmtype == ROTZOOM) {
assert(gm->wmmat[5] == gm->wmmat[2]);
......
......@@ -65,7 +65,6 @@ static uint8_t add_ref_mv_candidate(
int subsampling_y) {
int index = 0, ref;
int newmv_count = 0;
const int unify_bsize = 1;
assert(weight % 2 == 0);
#if !CONFIG_EXT_WARPED_MOTION
(void)bsize;
......@@ -146,33 +145,6 @@ static uint8_t add_ref_mv_candidate(
if (candidate->mode == NEWMV) ++newmv_count;
}
if (candidate_mi->mbmi.sb_type < BLOCK_8X8 && block >= 0 &&
!unify_bsize) {
int alt_block = 3 - block;
this_refmv = get_sub_block_mv(candidate_mi, ref, col, alt_block);
#if CONFIG_AMVR
lower_mv_precision(&this_refmv.as_mv, use_hp, is_integer);
#else
lower_mv_precision(&this_refmv.as_mv, use_hp);
#endif
for (index = 0; index < *refmv_count; ++index)
if (ref_mv_stack[index].this_mv.as_int == this_refmv.as_int) break;
if (index < *refmv_count) ref_mv_stack[index].weight += len;
// Add a new item to the list.
if (index == *refmv_count) {
ref_mv_stack[index].this_mv = this_refmv;
ref_mv_stack[index].pred_diff[0] = av1_get_pred_diff_ctx(
get_sub_block_pred_mv(candidate_mi, ref, col, alt_block),
this_refmv);
ref_mv_stack[index].weight = len;
++(*refmv_count);
if (candidate->mode == NEWMV) ++newmv_count;
}
}
}
}
} else {
......@@ -214,43 +186,6 @@ static uint8_t add_ref_mv_candidate(
if (candidate->mode == NEW_NEWMV) ++newmv_count;
}
if (candidate_mi->mbmi.sb_type < BLOCK_8X8 && block >= 0 &&
!unify_bsize) {
int alt_block = 3 - block;
this_refmv[0] = get_sub_block_mv(candidate_mi, 0, col, alt_block);
this_refmv[1] = get_sub_block_mv(candidate_mi, 1, col, alt_block);
for (ref = 0; ref < 2; ++ref) {
#if CONFIG_AMVR
lower_mv_precision(&this_refmv[ref].as_mv, use_hp, is_integer);
#else
lower_mv_precision(&this_refmv[ref].as_mv, use_hp);
#endif
}
for (index = 0; index < *refmv_count; ++index)
if (ref_mv_stack[index].this_mv.as_int == this_refmv[0].as_int &&
ref_mv_stack[index].comp_mv.as_int == this_refmv[1].as_int)
break;
if (index < *refmv_count) ref_mv_stack[index].weight += len;
// Add a new item to the list.
if (index == *refmv_count) {
ref_mv_stack[index].this_mv = this_refmv[0];
ref_mv_stack[index].comp_mv = this_refmv[1];
ref_mv_stack[index].pred_diff[0] = av1_get_pred_diff_ctx(
get_sub_block_pred_mv(candidate_mi, 0, col, block),
this_refmv[0]);
ref_mv_stack[index].pred_diff[0] = av1_get_pred_diff_ctx(
get_sub_block_pred_mv(candidate_mi, 1, col, block),
this_refmv[1]);
ref_mv_stack[index].weight = len;
++(*refmv_count);
if (candidate->mode == NEW_NEWMV) ++newmv_count;
}
}
}
}
return newmv_count;
......
......@@ -1524,7 +1524,6 @@ static void build_inter_predictors_for_planes(const AV1_COMMON *cm,
int plane;
const int mi_x = mi_col * MI_SIZE;
const int mi_y = mi_row * MI_SIZE;
const int unify_bsize = 1;
for (plane = plane_from; plane <= plane_to; ++plane) {
const struct macroblockd_plane *pd = &xd->plane[plane];
const int bw = pd->width;
......@@ -1534,33 +1533,11 @@ static void build_inter_predictors_for_planes(const AV1_COMMON *cm,
pd->subsampling_y))
continue;
if (xd->mi[0]->mbmi.sb_type < BLOCK_8X8 && !unify_bsize) {
const PARTITION_TYPE bp = bsize - xd->mi[0]->mbmi.sb_type;
const int have_vsplit = bp != PARTITION_HORZ;
const int have_hsplit = bp != PARTITION_VERT;
const int num_4x4_w = 2 >> ((!have_vsplit) | pd->subsampling_x);
const int num_4x4_h = 2 >> ((!have_hsplit) | pd->subsampling_y);
const int pw = 8 >> (have_vsplit | pd->subsampling_x);
const int ph = 8 >> (have_hsplit | pd->subsampling_y);
int x, y;
assert(bp != PARTITION_NONE && bp < PARTITION_TYPES);
assert(bsize == BLOCK_8X8);
assert(pw * num_4x4_w == bw && ph * num_4x4_h == bh);
for (y = 0; y < num_4x4_h; ++y)
for (x = 0; x < num_4x4_w; ++x)
build_inter_predictors(cm, xd, plane,
build_inter_predictors(cm, xd, plane,
#if CONFIG_MOTION_VAR
xd->mi[0], 0,
#endif // CONFIG_MOTION_VAR
y * 2 + x, bw, bh, 4 * x, 4 * y, pw, ph, mi_x,
mi_y);
} else {
build_inter_predictors(cm, xd, plane,
#if CONFIG_MOTION_VAR
xd->mi[0], 0,
xd->mi[0], 0,
#endif // CONFIG_MOTION_VAR
0, bw, bh, 0, 0, bw, bh, mi_x, mi_y);
}
0, bw, bh, 0, 0, bw, bh, mi_x, mi_y);
}
}
......
......@@ -448,35 +448,11 @@ static INLINE int has_subpel_mv_component(const MODE_INFO *const mi,
const BLOCK_SIZE bsize = mbmi->sb_type;
int plane;
int ref = (dir >> 1);
const int unify_bsize = 1;
if (bsize >= BLOCK_8X8 || unify_bsize) {
if (dir & 0x01) {
if (mbmi->mv[ref].as_mv.col & SUBPEL_MASK) return 1;
} else {
if (mbmi->mv[ref].as_mv.row & SUBPEL_MASK) return 1;
}
if (dir & 0x01) {
if (mbmi->mv[ref].as_mv.col & SUBPEL_MASK) return 1;
} else {
for (plane = 0; plane < MAX_MB_PLANE; ++plane) {
const PARTITION_TYPE bp = BLOCK_8X8 - bsize;
const struct macroblockd_plane *const pd = &xd->plane[plane];
const int have_vsplit = bp != PARTITION_HORZ;
const int have_hsplit = bp != PARTITION_VERT;
const int num_4x4_w = 2 >> ((!have_vsplit) | pd->subsampling_x);
const int num_4x4_h = 2 >> ((!have_hsplit) | pd->subsampling_y);
int x, y;
for (y = 0; y < num_4x4_h; ++y) {
for (x = 0; x < num_4x4_w; ++x) {
const MV mv = average_split_mvs(pd, mi, ref, y * 2 + x);
if (dir & 0x01) {
if (mv.col & SUBPEL_MASK) return 1;
} else {
if (mv.row & SUBPEL_MASK) return 1;
}
}
}
}
if (mbmi->mv[ref].as_mv.row & SUBPEL_MASK) return 1;
}
return 0;
......
......@@ -1169,7 +1169,6 @@ static void detoken_and_recon_sb(AV1Decoder *const pbi, MACROBLOCKD *const xd,
BLOCK_SIZE bsize) {
AV1_COMMON *const cm = &pbi->common;
const int hbs = mi_size_wide[bsize] >> 1;
const int unify_bsize = 1;
#if CONFIG_EXT_PARTITION_TYPES
BLOCK_SIZE bsize2 = get_subsize(bsize, PARTITION_SPLIT);
#endif
......@@ -1183,62 +1182,54 @@ static void detoken_and_recon_sb(AV1Decoder *const pbi, MACROBLOCKD *const xd,
partition = get_partition(cm, mi_row, mi_col, bsize);
subsize = subsize_lookup[partition][bsize];
if (!hbs && !unify_bsize) {
xd->bmode_blocks_wl = 1 >> !!(partition & PARTITION_VERT);
xd->bmode_blocks_hl = 1 >> !!(partition & PARTITION_HORZ);
decode_token_and_recon_block(pbi, xd, mi_row, mi_col, r, subsize);
} else {
switch (partition) {
case PARTITION_NONE:
decode_token_and_recon_block(pbi, xd, mi_row, mi_col, r, bsize);
break;
case PARTITION_HORZ:
decode_token_and_recon_block(pbi, xd, mi_row, mi_col, r, subsize);
if (has_rows)
decode_token_and_recon_block(pbi, xd, mi_row + hbs, mi_col, r,
subsize);
break;
case PARTITION_VERT:
decode_token_and_recon_block(pbi, xd, mi_row, mi_col, r, subsize);
if (has_cols)
decode_token_and_recon_block(pbi, xd, mi_row, mi_col + hbs, r,
subsize);
break;
case PARTITION_SPLIT:
detoken_and_recon_sb(pbi, xd, mi_row, mi_col, r, subsize);
detoken_and_recon_sb(pbi, xd, mi_row, mi_col + hbs, r, subsize);
detoken_and_recon_sb(pbi, xd, mi_row + hbs, mi_col, r, subsize);
detoken_and_recon_sb(pbi, xd, mi_row + hbs, mi_col + hbs, r, subsize);
break;
switch (partition) {
case PARTITION_NONE:
decode_token_and_recon_block(pbi, xd, mi_row, mi_col, r, bsize);
break;
case PARTITION_HORZ:
decode_token_and_recon_block(pbi, xd, mi_row, mi_col, r, subsize);
if (has_rows)
decode_token_and_recon_block(pbi, xd, mi_row + hbs, mi_col, r, subsize);
break;
case PARTITION_VERT:
decode_token_and_recon_block(pbi, xd, mi_row, mi_col, r, subsize);
if (has_cols)
decode_token_and_recon_block(pbi, xd, mi_row, mi_col + hbs, r, subsize);
break;
case PARTITION_SPLIT:
detoken_and_recon_sb(pbi, xd, mi_row, mi_col, r, subsize);
detoken_and_recon_sb(pbi, xd, mi_row, mi_col + hbs, r, subsize);
detoken_and_recon_sb(pbi, xd, mi_row + hbs, mi_col, r, subsize);
detoken_and_recon_sb(pbi, xd, mi_row + hbs, mi_col + hbs, r, subsize);
break;
#if CONFIG_EXT_PARTITION_TYPES
#if CONFIG_EXT_PARTITION_TYPES_AB
#error NC_MODE_INFO+MOTION_VAR not yet supported for new HORZ/VERT_AB partitions
#endif
case PARTITION_HORZ_A:
decode_token_and_recon_block(pbi, xd, mi_row, mi_col, r, bsize2);
decode_token_and_recon_block(pbi, xd, mi_row, mi_col + hbs, r, bsize2);
decode_token_and_recon_block(pbi, xd, mi_row + hbs, mi_col, r, subsize);
break;
case PARTITION_HORZ_B:
decode_token_and_recon_block(pbi, xd, mi_row, mi_col, r, subsize);
decode_token_and_recon_block(pbi, xd, mi_row + hbs, mi_col, r, bsize2);
decode_token_and_recon_block(pbi, xd, mi_row + hbs, mi_col + hbs, r,
bsize2);
break;
case PARTITION_VERT_A:
decode_token_and_recon_block(pbi, xd, mi_row, mi_col, r, bsize2);
decode_token_and_recon_block(pbi, xd, mi_row + hbs, mi_col, r, bsize2);
decode_token_and_recon_block(pbi, xd, mi_row, mi_col + hbs, r, subsize);
break;
case PARTITION_VERT_B:
decode_token_and_recon_block(pbi, xd, mi_row, mi_col, r, subsize);
decode_token_and_recon_block(pbi, xd, mi_row, mi_col + hbs, r, bsize2);
decode_token_and_recon_block(pbi, xd, mi_row + hbs, mi_col + hbs, r,
bsize2);
break;
case PARTITION_HORZ_A:
decode_token_and_recon_block(pbi, xd, mi_row, mi_col, r, bsize2);
decode_token_and_recon_block(pbi, xd, mi_row, mi_col + hbs, r, bsize2);
decode_token_and_recon_block(pbi, xd, mi_row + hbs, mi_col, r, subsize);
break;
case PARTITION_HORZ_B:
decode_token_and_recon_block(pbi, xd, mi_row, mi_col, r, subsize);
decode_token_and_recon_block(pbi, xd, mi_row + hbs, mi_col, r, bsize2);
decode_token_and_recon_block(pbi, xd, mi_row + hbs, mi_col + hbs, r,
bsize2);
break;
case PARTITION_VERT_A:
decode_token_and_recon_block(pbi, xd, mi_row, mi_col, r, bsize2);
decode_token_and_recon_block(pbi, xd, mi_row + hbs, mi_col, r, bsize2);
decode_token_and_recon_block(pbi, xd, mi_row, mi_col + hbs, r, subsize);
break;
case PARTITION_VERT_B:
decode_token_and_recon_block(pbi, xd, mi_row, mi_col, r, subsize);
decode_token_and_recon_block(pbi, xd, mi_row, mi_col + hbs, r, bsize2);
decode_token_and_recon_block(pbi, xd, mi_row + hbs, mi_col + hbs, r,
bsize2);
break;
#endif
default: assert(0 && "Invalid partition type");
}
default: assert(0 && "Invalid partition type");
}
}
#endif
......@@ -1317,7 +1308,6 @@ static void decode_partition(AV1Decoder *const pbi, MACROBLOCKD *const xd,
#if CONFIG_EXT_PARTITION_TYPES && CONFIG_EXT_PARTITION_TYPES_AB
const int qbs = num_8x8_wh >> 2;
#endif
const int unify_bsize = 1;
PARTITION_TYPE partition;
BLOCK_SIZE subsize;
#if CONFIG_EXT_PARTITION_TYPES
......@@ -1363,93 +1353,86 @@ static void decode_partition(AV1Decoder *const pbi, MACROBLOCKD *const xd,
#define DEC_PARTITION(db_r, db_c, db_subsize) \
decode_partition(pbi, xd, DEC_BLOCK_STX_ARG(db_r), (db_c), r, (db_subsize))
if (!hbs && !unify_bsize) {
// calculate bmode block dimensions (log 2)
xd->bmode_blocks_wl = 1 >> !!(partition & PARTITION_VERT);
xd->bmode_blocks_hl = 1 >> !!(partition & PARTITION_HORZ);
DEC_BLOCK(mi_row, mi_col, subsize);
} else {
switch (partition) {
case PARTITION_NONE: DEC_BLOCK(mi_row, mi_col, subsize); break;
case PARTITION_HORZ:
DEC_BLOCK(mi_row, mi_col, subsize);
if (has_rows) DEC_BLOCK(mi_row + hbs, mi_col, subsize);
break;
case PARTITION_VERT:
DEC_BLOCK(mi_row, mi_col, subsize);
if (has_cols) DEC_BLOCK(mi_row, mi_col + hbs, subsize);
break;
case PARTITION_SPLIT:
DEC_PARTITION(mi_row, mi_col, subsize);
DEC_PARTITION(mi_row, mi_col + hbs, subsize);
DEC_PARTITION(mi_row + hbs, mi_col, subsize);
DEC_PARTITION(mi_row + hbs, mi_col + hbs, subsize);
break;
switch (partition) {
case PARTITION_NONE: DEC_BLOCK(mi_row, mi_col, subsize); break;
case PARTITION_HORZ:
DEC_BLOCK(mi_row, mi_col, subsize);
if (has_rows) DEC_BLOCK(mi_row + hbs, mi_col, subsize);
break;
case PARTITION_VERT:
DEC_BLOCK(mi_row, mi_col, subsize);
if (has_cols) DEC_BLOCK(mi_row, mi_col + hbs, subsize);
break;
case PARTITION_SPLIT:
DEC_PARTITION(mi_row, mi_col, subsize);
DEC_PARTITION(mi_row, mi_col + hbs, subsize);
DEC_PARTITION(mi_row + hbs, mi_col, subsize);
DEC_PARTITION(mi_row + hbs, mi_col + hbs, subsize);
break;
#if CONFIG_EXT_PARTITION_TYPES
#if CONFIG_EXT_PARTITION_TYPES_AB
case PARTITION_HORZ_A:
DEC_BLOCK(mi_row, mi_col, get_subsize(bsize, PARTITION_HORZ_4));
DEC_BLOCK(mi_row + qbs, mi_col, get_subsize(bsize, PARTITION_HORZ_4));
DEC_BLOCK(mi_row + hbs, mi_col, subsize);
break;
case PARTITION_HORZ_B:
DEC_BLOCK(mi_row, mi_col, subsize);
DEC_BLOCK(mi_row + hbs, mi_col, get_subsize(bsize, PARTITION_HORZ_4));
if (mi_row + 3 * qbs < cm->mi_rows)
DEC_BLOCK(mi_row + 3 * qbs, mi_col,
get_subsize(bsize, PARTITION_HORZ_4));
break;
case PARTITION_VERT_A:
DEC_BLOCK(mi_row, mi_col, get_subsize(bsize, PARTITION_VERT_4));
DEC_BLOCK(mi_row, mi_col + qbs, get_subsize(bsize, PARTITION_VERT_4));
DEC_BLOCK(mi_row, mi_col + hbs, subsize);
break;
case PARTITION_VERT_B:
DEC_BLOCK(mi_row, mi_col, subsize);
DEC_BLOCK(mi_row, mi_col + hbs, get_subsize(bsize, PARTITION_VERT_4));
if (mi_col + 3 * qbs < cm->mi_cols)
DEC_BLOCK(mi_row, mi_col + 3 * qbs,
get_subsize(bsize, PARTITION_VERT_4));
break;
case PARTITION_HORZ_A:
DEC_BLOCK(mi_row, mi_col, get_subsize(bsize, PARTITION_HORZ_4));
DEC_BLOCK(mi_row + qbs, mi_col, get_subsize(bsize, PARTITION_HORZ_4));
DEC_BLOCK(mi_row + hbs, mi_col, subsize);
break;
case PARTITION_HORZ_B:
DEC_BLOCK(mi_row, mi_col, subsize);
DEC_BLOCK(mi_row + hbs, mi_col, get_subsize(bsize, PARTITION_HORZ_4));
if (mi_row + 3 * qbs < cm->mi_rows)
DEC_BLOCK(mi_row + 3 * qbs, mi_col,
get_subsize(bsize, PARTITION_HORZ_4));
break;
case PARTITION_VERT_A:
DEC_BLOCK(mi_row, mi_col, get_subsize(bsize, PARTITION_VERT_4));
DEC_BLOCK(mi_row, mi_col + qbs, get_subsize(bsize, PARTITION_VERT_4));
DEC_BLOCK(mi_row, mi_col + hbs, subsize);
break;
case PARTITION_VERT_B:
DEC_BLOCK(mi_row, mi_col, subsize);
DEC_BLOCK(mi_row, mi_col + hbs, get_subsize(bsize, PARTITION_VERT_4));
if (mi_col + 3 * qbs < cm->mi_cols)
DEC_BLOCK(mi_row, mi_col + 3 * qbs,
get_subsize(bsize, PARTITION_VERT_4));
break;
#else
case PARTITION_HORZ_A:
DEC_BLOCK(mi_row, mi_col, bsize2);
DEC_BLOCK(mi_row, mi_col + hbs, bsize2);
DEC_BLOCK(mi_row + hbs, mi_col, subsize);
break;
case PARTITION_HORZ_B:
DEC_BLOCK(mi_row, mi_col, subsize);
DEC_BLOCK(mi_row + hbs, mi_col, bsize2);
DEC_BLOCK(mi_row + hbs, mi_col + hbs, bsize2);
break;
case PARTITION_VERT_A:
DEC_BLOCK(mi_row, mi_col, bsize2);
DEC_BLOCK(mi_row + hbs, mi_col, bsize2);
DEC_BLOCK(mi_row, mi_col + hbs, subsize);
break;
case PARTITION_VERT_B:
DEC_BLOCK(mi_row, mi_col, subsize);
DEC_BLOCK(mi_row, mi_col + hbs, bsize2);
DEC_BLOCK(mi_row + hbs, mi_col + hbs, bsize2);
break;
case PARTITION_HORZ_A:
DEC_BLOCK(mi_row, mi_col, bsize2);
DEC_BLOCK(mi_row, mi_col + hbs, bsize2);
DEC_BLOCK(mi_row + hbs, mi_col, subsize);
break;
case PARTITION_HORZ_B:
DEC_BLOCK(mi_row, mi_col, subsize);
DEC_BLOCK(mi_row + hbs, mi_col, bsize2);
DEC_BLOCK(mi_row + hbs, mi_col + hbs, bsize2);
break;
case PARTITION_VERT_A:
DEC_BLOCK(mi_row, mi_col, bsize2);
DEC_BLOCK(mi_row + hbs, mi_col, bsize2);
DEC_BLOCK(mi_row, mi_col + hbs, subsize);
break;
case PARTITION_VERT_B:
DEC_BLOCK(mi_row, mi_col, subsize);
DEC_BLOCK(mi_row, mi_col + hbs, bsize2);
DEC_BLOCK(mi_row + hbs, mi_col + hbs, bsize2);
break;
#endif
case PARTITION_HORZ_4:
for (i = 0; i < 4; ++i) {
int this_mi_row = mi_row + i * quarter_step;
if (i > 0 && this_mi_row >= cm->mi_rows) break;
DEC_BLOCK(this_mi_row, mi_col, subsize);
}
break;
case PARTITION_VERT_4:
for (i = 0; i < 4; ++i) {
int this_mi_col = mi_col + i * quarter_step;
if (i > 0 && this_mi_col >= cm->mi_cols) break;
DEC_BLOCK(mi_row, this_mi_col, subsize);
}
break;
case PARTITION_HORZ_4:
for (i = 0; i < 4; ++i) {
int this_mi_row = mi_row + i * quarter_step;
if (i > 0 && this_mi_row >= cm->mi_rows) break;
DEC_BLOCK(this_mi_row, mi_col, subsize);
}
break;
case PARTITION_VERT_4:
for (i = 0; i < 4; ++i) {
int this_mi_col = mi_col + i * quarter_step;
if (i > 0 && this_mi_col >= cm->mi_cols) break;
DEC_BLOCK(mi_row, this_mi_col, subsize);
}
break;
#endif // CONFIG_EXT_PARTITION_TYPES
default: assert(0 && "Invalid partition type");
}
default: assert(0 && "Invalid partition type");
}
#undef DEC_PARTITION
......
......@@ -197,17 +197,13 @@ static PREDICTION_MODE read_inter_mode(FRAME_CONTEXT *ec_ctx, MACROBLOCKD *xd,
#else
is_newmv = aom_read(r, ec_ctx->newmv_prob[mode_ctx], ACCT_STR) == 0;
#endif
if (is_newmv) {
if (counts) ++counts->newmv_mode[mode_ctx][0];
return NEWMV;
}
if (counts) ++counts->newmv_mode[mode_ctx][1];
if (ctx & (1 << ALL_ZERO_FLAG_OFFSET)) return ZEROMV;
mode_ctx = (ctx >> ZEROMV_OFFSET) & ZEROMV_CTX_MASK;
#if CONFIG_NEW_MULTISYMBOL
is_zeromv =
aom_read_symbol(r, ec_ctx->zeromv_cdf[mode_ctx], 2, ACCT_STR) == 0;
......@@ -219,28 +215,22 @@ static PREDICTION_MODE read_inter_mode(FRAME_CONTEXT *ec_ctx, MACROBLOCKD *xd,
return ZEROMV;
}
if (counts) ++counts->zeromv_mode[mode_ctx][1];
mode_ctx = (ctx >> REFMV_OFFSET) & REFMV_CTX_MASK;
if (ctx & (1 << SKIP_NEARESTMV_OFFSET)) mode_ctx = 6;
if (ctx & (1 << SKIP_NEARMV_OFFSET)) mode_ctx = 7;
if (ctx & (1 << SKIP_NEARESTMV_SUB8X8_OFFSET)) mode_ctx = 8;
#if CONFIG_NEW_MULTISYMBOL
is_refmv = aom_read_symbol(r, ec_ctx->refmv_cdf[mode_ctx], 2, ACCT_STR) == 0;
#else
is_refmv = aom_read(r, ec_ctx->refmv_prob[mode_ctx], ACCT_STR) == 0;
#endif
if (is_refmv) {
if (counts) ++counts->refmv_mode[mode_ctx][0];
return NEARESTMV;
} else {
if (counts) ++counts->refmv_mode[mode_ctx][1];
return NEARMV;
}
// Invalid prediction mode.
assert(0);
}
......@@ -249,7 +239,6 @@ static void read_drl_idx(FRAME_CONTEXT *ec_ctx, MACROBLOCKD *xd,
MB_MODE_INFO *mbmi, aom_reader *r) {
uint8_t ref_frame_type = av1_ref_frame_type(mbmi->ref_frame);
mbmi->ref_mv_idx = 0;
if (mbmi->mode == NEWMV || mbmi->mode == NEW_NEWMV
#if CONFIG_COMPOUND_SINGLEREF
|| mbmi->mode == SR_NEW_NEWMV
......@@ -270,7 +259,6 @@ static void read_drl_idx(FRAME_CONTEXT *ec_ctx, MACROBLOCKD *xd,
}
}
}
if (have_nearmv_in_inter_mode(mbmi->mode)) {
int idx;
// Offset the NEARESTMV mode.
......@@ -377,9 +365,7 @@ static PREDICTION_MODE read_inter_compound_mode(AV1_COMMON *cm, MACROBLOCKD *xd,
aom_read_symbol(r, xd->tile_ctx->inter_compound_mode_cdf[ctx],
INTER_COMPOUND_MODES, ACCT_STR);
FRAME_COUNTS *counts = xd->counts;
if (counts) ++counts->inter_compound_mode[ctx][mode];
assert(is_inter_compound_mode(NEAREST_NEARESTMV + mode));
return NEAREST_NEARESTMV + mode;
}
......@@ -2224,7 +2210,6 @@ static void read_inter_block_mode_info(AV1Decoder *const pbi,
MB_MODE_INFO *const mbmi = &mi->mbmi;
const BLOCK_SIZE bsize = mbmi->sb_type;
const int allow_hp = cm->allow_high_precision_mv;
const int unify_bsize = 1;