Commit 1f26840f authored by Jingning Han's avatar Jingning Han

Enable recursive partition down to 4x4

This commit allows the rate-distortion optimization recursion
at encoder to go down to 4x4 block size. It deprecates the use
of I4X4_PRED and SPLITMV syntax elements from bit-stream
writing/reading. Will remove the unused probability models in
the next patch.

The partition type search and bit-stream are now capable of
supporting the rectangular partition of 8x8 block, i.e., 8x4
and 4x8. Need to revise the rate-distortion parts to get these
two partition tested in the rd loop.

Change-Id: I0dfe3b90a1507ad6138db10cc58e6e237a06a9d6
parent dee12bdf
......@@ -222,12 +222,21 @@ static INLINE int b_height_log2(BLOCK_SIZE_TYPE sb_type) {
static INLINE int mi_width_log2(BLOCK_SIZE_TYPE sb_type) {
int a = b_width_log2(sb_type) - 1;
#if CONFIG_AB4X4
// align 4x4 block to mode_info
if (a < 0)
a = 0;
#endif
assert(a >= 0);
return a;
}
static INLINE int mi_height_log2(BLOCK_SIZE_TYPE sb_type) {
int a = b_height_log2(sb_type) - 1;
#if CONFIG_AB4X4
if (a < 0)
a = 0;
#endif
assert(a >= 0);
return a;
}
......@@ -442,9 +451,12 @@ static INLINE void update_partition_context(MACROBLOCKD *xd,
int bhl = mi_height_log2(sb_type);
int boffset = mi_width_log2(BLOCK_SIZE_SB64X64) - bsl;
int i;
// skip macroblock partition
#if !CONFIG_AB4X4
// skip 8x8 block partition
if (bsl == 0)
return;
#endif
// update the partition context at the end notes. set partition bits
// of block sizes larger than the current one to be one, and partition
......@@ -492,7 +504,11 @@ static INLINE int partition_plane_context(MACROBLOCKD *xd,
above = (above > 0);
left = (left > 0);
#if CONFIG_AB4X4
return (left * 2 + above) + bsl * PARTITION_PLOFFSET;
#else
return (left * 2 + above) + (bsl - 1) * PARTITION_PLOFFSET;
#endif
}
static BLOCK_SIZE_TYPE get_subsize(BLOCK_SIZE_TYPE bsize,
......@@ -509,6 +525,10 @@ static BLOCK_SIZE_TYPE get_subsize(BLOCK_SIZE_TYPE bsize,
subsize = BLOCK_SIZE_SB32X16;
else if (bsize == BLOCK_SIZE_MB16X16)
subsize = BLOCK_SIZE_SB16X8;
#if CONFIG_AB4X4
else if (bsize == BLOCK_SIZE_SB8X8)
subsize = BLOCK_SIZE_SB8X4;
#endif
else
assert(0);
break;
......@@ -519,6 +539,10 @@ static BLOCK_SIZE_TYPE get_subsize(BLOCK_SIZE_TYPE bsize,
subsize = BLOCK_SIZE_SB16X32;
else if (bsize == BLOCK_SIZE_MB16X16)
subsize = BLOCK_SIZE_SB8X16;
#if CONFIG_AB4X4
else if (bsize == BLOCK_SIZE_SB8X8)
subsize = BLOCK_SIZE_SB4X8;
#endif
else
assert(0);
break;
......@@ -529,6 +553,10 @@ static BLOCK_SIZE_TYPE get_subsize(BLOCK_SIZE_TYPE bsize,
subsize = BLOCK_SIZE_MB16X16;
else if (bsize == BLOCK_SIZE_MB16X16)
subsize = BLOCK_SIZE_SB8X8;
#if CONFIG_AB4X4
else if (bsize == BLOCK_SIZE_SB8X8)
subsize = BLOCK_SIZE_AB4X4;
#endif
else
assert(0);
break;
......
......@@ -106,6 +106,12 @@ const vp9_prob vp9_sub_mv_ref_prob2 [SUBMVREF_COUNT][VP9_SUBMVREFS - 1] = {
const vp9_prob vp9_partition_probs[NUM_PARTITION_CONTEXTS]
[PARTITION_TYPES - 1] = {
// FIXME(jingning,rbultje) put real probabilities here
#if CONFIG_AB4X4
{202, 162, 107},
{16, 2, 169},
{3, 246, 19},
{104, 90, 134},
#endif
{202, 162, 107},
{16, 2, 169},
{3, 246, 19},
......@@ -513,6 +519,7 @@ void vp9_adapt_mode_probs(VP9_COMMON *cm) {
vp9_sub_mv_ref_tree, fc->sub_mv_ref_counts[i],
fc->pre_sub_mv_ref_prob[i], fc->sub_mv_ref_prob[i],
LEFT4X4);
for (i = 0; i < NUM_PARTITION_CONTEXTS; i++)
update_mode_probs(PARTITION_TYPES, vp9_partition_tree,
fc->partition_counts[i], fc->pre_partition_prob[i],
......
......@@ -48,6 +48,10 @@ typedef enum PARTITION_TYPE {
} PARTITION_TYPE;
#define PARTITION_PLOFFSET 4 // number of probability models per block size
#if CONFIG_AB4X4
#define NUM_PARTITION_CONTEXTS (4 * PARTITION_PLOFFSET)
#else
#define NUM_PARTITION_CONTEXTS (3 * PARTITION_PLOFFSET)
#endif
#endif // VP9_COMMON_VP9_ENUMS_H_
......@@ -119,13 +119,25 @@ static void kfread_modes(VP9D_COMP *pbi, MODE_INFO *m,
m->mbmi.mb_skip_coeff = vp9_read(r, vp9_get_pred_prob(cm, xd, PRED_MBSKIP));
// luma mode
#if CONFIG_AB4X4
if (m->mbmi.sb_type >= BLOCK_SIZE_SB8X8)
m->mbmi.mode = read_kf_sb_ymode(r,
cm->sb_kf_ymode_prob[cm->kf_ymode_probs_index]);
else
m->mbmi.mode = I4X4_PRED;
#else
m->mbmi.mode = m->mbmi.sb_type > BLOCK_SIZE_SB8X8 ?
read_kf_sb_ymode(r, cm->sb_kf_ymode_prob[cm->kf_ymode_probs_index]):
read_kf_mb_ymode(r, cm->kf_ymode_prob[cm->kf_ymode_probs_index]);
#endif
m->mbmi.ref_frame = INTRA_FRAME;
#if CONFIG_AB4X4
if (m->mbmi.sb_type < BLOCK_SIZE_SB8X8) {
#else
if (m->mbmi.mode == I4X4_PRED) {
#endif
int i;
for (i = 0; i < 4; ++i) {
const B_PREDICTION_MODE a = above_block_mode(m, i, mis);
......@@ -139,7 +151,13 @@ static void kfread_modes(VP9D_COMP *pbi, MODE_INFO *m,
m->mbmi.uv_mode = read_uv_mode(r, cm->kf_uv_mode_prob[m->mbmi.mode]);
if (cm->txfm_mode == TX_MODE_SELECT &&
!m->mbmi.mb_skip_coeff && m->mbmi.mode != I4X4_PRED) {
!m->mbmi.mb_skip_coeff &&
#if CONFIG_AB4X4
m->mbmi.sb_type >= BLOCK_SIZE_SB8X8
#else
m->mbmi.mode != I4X4_PRED
#endif
) {
const int allow_16x16 = m->mbmi.sb_type >= BLOCK_SIZE_MB16X16;
const int allow_32x32 = m->mbmi.sb_type >= BLOCK_SIZE_SB32X32;
m->mbmi.txfm_size = select_txfm_size(cm, r, allow_16x16, allow_32x32);
......@@ -150,7 +168,13 @@ static void kfread_modes(VP9D_COMP *pbi, MODE_INFO *m,
m->mbmi.sb_type >= BLOCK_SIZE_MB16X16 &&
m->mbmi.mode <= TM_PRED) {
m->mbmi.txfm_size = TX_16X16;
} else if (cm->txfm_mode >= ALLOW_8X8 && m->mbmi.mode != I4X4_PRED) {
} else if (cm->txfm_mode >= ALLOW_8X8 &&
#if CONFIG_AB4X4
m->mbmi.sb_type >= BLOCK_SIZE_SB8X8
#else
m->mbmi.mode != I4X4_PRED
#endif
) {
m->mbmi.txfm_size = TX_8X8;
} else {
m->mbmi.txfm_size = TX_4X4;
......@@ -618,9 +642,16 @@ static void read_mb_modes_mv(VP9D_COMP *pbi, MODE_INFO *mi, MB_MODE_INFO *mbmi,
if (vp9_segfeature_active(xd, mbmi->segment_id, SEG_LVL_SKIP)) {
mbmi->mode = ZEROMV;
} else {
#if CONFIG_AB4X4
if (mbmi->sb_type >= BLOCK_SIZE_SB8X8)
mbmi->mode = read_sb_mv_ref(r, mv_ref_p);
else
mbmi->mode = SPLITMV;
#else
mbmi->mode = mbmi->sb_type > BLOCK_SIZE_SB8X8 ?
read_sb_mv_ref(r, mv_ref_p)
: read_mv_ref(r, mv_ref_p);
#endif
vp9_accum_mv_refs(cm, mbmi->mode, mbmi->mb_mode_context[ref_frame]);
}
......@@ -820,6 +851,14 @@ static void read_mb_modes_mv(VP9D_COMP *pbi, MODE_INFO *mi, MB_MODE_INFO *mbmi,
// required for left and above block mv
mv0->as_int = 0;
#if CONFIG_AB4X4
if (mbmi->sb_type >= BLOCK_SIZE_SB8X8) {
mbmi->mode = read_sb_ymode(r, cm->fc.sb_ymode_prob);
cm->fc.sb_ymode_counts[mbmi->mode]++;
} else {
mbmi->mode = I4X4_PRED;
}
#else
if (mbmi->sb_type > BLOCK_SIZE_SB8X8) {
mbmi->mode = read_sb_ymode(r, cm->fc.sb_ymode_prob);
cm->fc.sb_ymode_counts[mbmi->mode]++;
......@@ -827,9 +866,14 @@ static void read_mb_modes_mv(VP9D_COMP *pbi, MODE_INFO *mi, MB_MODE_INFO *mbmi,
mbmi->mode = read_ymode(r, cm->fc.ymode_prob);
cm->fc.ymode_counts[mbmi->mode]++;
}
#endif
// If MB mode is I4X4_PRED read the block modes
#if CONFIG_AB4X4
if (mbmi->sb_type < BLOCK_SIZE_SB8X8) {
#else
if (mbmi->mode == I4X4_PRED) {
#endif
int j = 0;
do {
int m = read_bmode(r, cm->fc.bmode_prob);
......@@ -842,9 +886,14 @@ static void read_mb_modes_mv(VP9D_COMP *pbi, MODE_INFO *mi, MB_MODE_INFO *mbmi,
cm->fc.uv_mode_counts[mbmi->mode][mbmi->uv_mode]++;
}
#if CONFIG_AB4X4
if (cm->txfm_mode == TX_MODE_SELECT && mbmi->mb_skip_coeff == 0 &&
mbmi->sb_type >= BLOCK_SIZE_SB8X8) {
#else
if (cm->txfm_mode == TX_MODE_SELECT && mbmi->mb_skip_coeff == 0 &&
((mbmi->ref_frame == INTRA_FRAME && mbmi->mode != I4X4_PRED) ||
(mbmi->ref_frame != INTRA_FRAME && mbmi->mode != SPLITMV))) {
#endif
const int allow_16x16 = mbmi->sb_type >= BLOCK_SIZE_MB16X16;
const int allow_32x32 = mbmi->sb_type >= BLOCK_SIZE_SB32X32;
mbmi->txfm_size = select_txfm_size(cm, r, allow_16x16, allow_32x32);
......@@ -852,13 +901,21 @@ static void read_mb_modes_mv(VP9D_COMP *pbi, MODE_INFO *mi, MB_MODE_INFO *mbmi,
cm->txfm_mode >= ALLOW_32X32) {
mbmi->txfm_size = TX_32X32;
} else if (cm->txfm_mode >= ALLOW_16X16 &&
mbmi->sb_type >= BLOCK_SIZE_MB16X16 &&
((mbmi->ref_frame == INTRA_FRAME && mbmi->mode <= TM_PRED) ||
(mbmi->ref_frame != INTRA_FRAME && mbmi->mode != SPLITMV))) {
mbmi->sb_type >= BLOCK_SIZE_MB16X16
#if !CONFIG_AB4X4
&& ((mbmi->ref_frame == INTRA_FRAME && mbmi->mode <= TM_PRED) ||
(mbmi->ref_frame != INTRA_FRAME && mbmi->mode != SPLITMV))
#endif
) {
mbmi->txfm_size = TX_16X16;
} else if (cm->txfm_mode >= ALLOW_8X8 &&
#if CONFIG_AB4X4
(mbmi->sb_type >= BLOCK_SIZE_SB8X8))
#else
(!(mbmi->ref_frame == INTRA_FRAME && mbmi->mode == I4X4_PRED) &&
!(mbmi->ref_frame != INTRA_FRAME && mbmi->mode == SPLITMV))) {
!(mbmi->ref_frame != INTRA_FRAME && mbmi->mode == SPLITMV)))
#endif
{
mbmi->txfm_size = TX_8X8;
} else {
mbmi->txfm_size = TX_4X4;
......
......@@ -417,10 +417,14 @@ static void decode_modes_b(VP9D_COMP *pbi, int mi_row, int mi_col,
vp9_decode_mb_mode_mv(pbi, xd, mi_row, mi_col, r);
set_refs(pbi, mi_row, mi_col);
#if CONFIG_AB4X4
if (bsize < BLOCK_SIZE_SB8X8)
#else
if (bsize == BLOCK_SIZE_SB8X8 &&
(xd->mode_info_context->mbmi.mode == SPLITMV ||
xd->mode_info_context->mbmi.mode == I4X4_PRED))
decode_atom(pbi, xd, mi_row, mi_col, r, bsize);
#endif
decode_atom(pbi, xd, mi_row, mi_col, r, BLOCK_SIZE_SB8X8);
else
decode_sb(pbi, xd, mi_row, mi_col, r, bsize);
......@@ -439,7 +443,17 @@ static void decode_modes_sb(VP9D_COMP *pbi, int mi_row, int mi_col,
if (mi_row >= pc->mi_rows || mi_col >= pc->mi_cols)
return;
#if CONFIG_AB4X4
if (bsize < BLOCK_SIZE_SB8X8)
if (xd->ab_index != 0)
return;
#endif
#if CONFIG_AB4X4
if (bsize >= BLOCK_SIZE_SB8X8) {
#else
if (bsize > BLOCK_SIZE_SB8X8) {
#endif
int pl;
// read the partition information
xd->left_seg_context = pc->left_seg_context + (mi_row & MI_MASK);
......@@ -451,6 +465,7 @@ static void decode_modes_sb(VP9D_COMP *pbi, int mi_row, int mi_col,
}
subsize = get_subsize(bsize, partition);
switch (partition) {
case PARTITION_NONE:
decode_modes_b(pbi, mi_row, mi_col, r, subsize);
......@@ -476,8 +491,13 @@ static void decode_modes_sb(VP9D_COMP *pbi, int mi_row, int mi_col,
assert(0);
}
// update partition context
#if CONFIG_AB4X4
if (bsize >= BLOCK_SIZE_SB8X8 &&
(bsize == BLOCK_SIZE_SB8X8 || partition != PARTITION_SPLIT)) {
#else
if (bsize > BLOCK_SIZE_SB8X8 &&
(bsize == BLOCK_SIZE_MB16X16 || partition != PARTITION_SPLIT)) {
#endif
set_partition_seg_context(pc, xd, mi_row, mi_col);
update_partition_context(xd, subsize, bsize);
}
......
......@@ -629,12 +629,21 @@ static void pack_inter_mode_mvs(VP9_COMP *cpi, MODE_INFO *m,
active_section = 6;
#endif
#if CONFIG_AB4X4
if (m->mbmi.sb_type >= BLOCK_SIZE_SB8X8)
write_sb_ymode(bc, mode, pc->fc.sb_ymode_prob);
#else
if (m->mbmi.sb_type > BLOCK_SIZE_SB8X8)
write_sb_ymode(bc, mode, pc->fc.sb_ymode_prob);
else
write_ymode(bc, mode, pc->fc.ymode_prob);
#endif
#if CONFIG_AB4X4
if (m->mbmi.sb_type < BLOCK_SIZE_SB8X8) {
#else
if (mode == I4X4_PRED) {
#endif
int j = 0;
do {
write_bmode(bc, m->bmi[j].as_mode.first,
......@@ -654,11 +663,16 @@ static void pack_inter_mode_mvs(VP9_COMP *cpi, MODE_INFO *m,
// If segment skip is not enabled code the mode.
if (!vp9_segfeature_active(xd, segment_id, SEG_LVL_SKIP)) {
#if CONFIG_AB4X4
if (mi->sb_type >= BLOCK_SIZE_SB8X8)
write_sb_mv_ref(bc, mode, mv_ref_p);
#else
if (mi->sb_type > BLOCK_SIZE_SB8X8) {
write_sb_mv_ref(bc, mode, mv_ref_p);
} else {
write_mv_ref(bc, mode, mv_ref_p);
}
#endif
vp9_accum_mv_refs(&cpi->common, mode, mi->mb_mode_context[rf]);
}
......@@ -744,11 +758,20 @@ static void pack_inter_mode_mvs(VP9_COMP *cpi, MODE_INFO *m,
}
}
#if CONFIG_AB4X4
if (((rf == INTRA_FRAME && mi->sb_type >= BLOCK_SIZE_SB8X8) ||
(rf != INTRA_FRAME && mi->sb_type >= BLOCK_SIZE_SB8X8)) &&
pc->txfm_mode == TX_MODE_SELECT &&
!(skip_coeff || vp9_segfeature_active(xd, segment_id,
SEG_LVL_SKIP)))
#else
if (((rf == INTRA_FRAME && mode != I4X4_PRED) ||
(rf != INTRA_FRAME && mode != SPLITMV)) &&
pc->txfm_mode == TX_MODE_SELECT &&
!(skip_coeff || vp9_segfeature_active(xd, segment_id,
SEG_LVL_SKIP))) {
SEG_LVL_SKIP)))
#endif
{
TX_SIZE sz = mi->txfm_size;
// FIXME(rbultje) code ternary symbol once all experiments are merged
vp9_write(bc, sz != TX_4X4, pc->prob_tx[0]);
......@@ -780,12 +803,21 @@ static void write_mb_modes_kf(const VP9_COMP *cpi,
vp9_write(bc, skip_coeff, vp9_get_pred_prob(c, xd, PRED_MBSKIP));
}
#if CONFIG_AB4X4
if (m->mbmi.sb_type >= BLOCK_SIZE_SB8X8)
sb_kfwrite_ymode(bc, ym, c->sb_kf_ymode_prob[c->kf_ymode_probs_index]);
#else
if (m->mbmi.sb_type > BLOCK_SIZE_SB8X8)
sb_kfwrite_ymode(bc, ym, c->sb_kf_ymode_prob[c->kf_ymode_probs_index]);
else
kfwrite_ymode(bc, ym, c->kf_ymode_prob[c->kf_ymode_probs_index]);
#endif
#if CONFIG_AB4X4
if (m->mbmi.sb_type < BLOCK_SIZE_SB8X8) {
#else
if (ym == I4X4_PRED) {
#endif
int i = 0;
do {
const B_PREDICTION_MODE a = above_block_mode(m, i, mis);
......@@ -803,8 +835,13 @@ static void write_mb_modes_kf(const VP9_COMP *cpi,
write_uv_mode(bc, m->mbmi.uv_mode, c->kf_uv_mode_prob[ym]);
#if CONFIG_AB4X4
if (m->mbmi.sb_type >= BLOCK_SIZE_SB8X8 && c->txfm_mode == TX_MODE_SELECT &&
!(skip_coeff || vp9_segfeature_active(xd, segment_id, SEG_LVL_SKIP))) {
#else
if (ym != I4X4_PRED && c->txfm_mode == TX_MODE_SELECT &&
!(skip_coeff || vp9_segfeature_active(xd, segment_id, SEG_LVL_SKIP))) {
#endif
TX_SIZE sz = m->mbmi.txfm_size;
// FIXME(rbultje) code ternary symbol once all experiments are merged
vp9_write(bc, sz != TX_4X4, c->prob_tx[0]);
......@@ -876,7 +913,19 @@ static void write_modes_sb(VP9_COMP *cpi, MODE_INFO *m, vp9_writer *bc,
else
assert(0);
#if CONFIG_AB4X4
if (bsize == BLOCK_SIZE_SB8X8 && m->mbmi.sb_type < BLOCK_SIZE_SB8X8)
partition = PARTITION_SPLIT;
if (bsize < BLOCK_SIZE_SB8X8)
if (xd->ab_index != 0)
return;
#endif
#if CONFIG_AB4X4
if (bsize >= BLOCK_SIZE_SB8X8) {
#else
if (bsize > BLOCK_SIZE_SB8X8) {
#endif
int pl;
xd->left_seg_context = cm->left_seg_context + (mi_row & MI_MASK);
xd->above_seg_context = cm->above_seg_context + mi_col;
......@@ -915,8 +964,13 @@ static void write_modes_sb(VP9_COMP *cpi, MODE_INFO *m, vp9_writer *bc,
}
// update partition context
#if CONFIG_AB4X4
if (bsize >= BLOCK_SIZE_SB8X8 &&
(bsize == BLOCK_SIZE_SB8X8 || partition != PARTITION_SPLIT)) {
#else
if (bsize > BLOCK_SIZE_SB8X8 &&
(bsize == BLOCK_SIZE_MB16X16 || partition != PARTITION_SPLIT)) {
#endif
set_partition_seg_context(cm, xd, mi_row, mi_col);
update_partition_context(xd, subsize, bsize);
}
......
......@@ -140,7 +140,12 @@ struct macroblock {
// TODO(jingning): Need to refactor the structure arrays that buffers the
// coding mode decisions of each partition type.
PICK_MODE_CONTEXT sb8_context[4][4][4];
#if CONFIG_AB4X4
PICK_MODE_CONTEXT ab4x4_context[4][4][4];
PICK_MODE_CONTEXT sb8x4_context[4][4][4];
PICK_MODE_CONTEXT sb4x8_context[4][4][4];
#endif
PICK_MODE_CONTEXT sb8x8_context[4][4][4];
PICK_MODE_CONTEXT sb8x16_context[4][4][2];
PICK_MODE_CONTEXT sb16x8_context[4][4][2];
PICK_MODE_CONTEXT mb_context[4][4];
......@@ -153,6 +158,9 @@ struct macroblock {
PICK_MODE_CONTEXT sb64_context;
int partition_cost[NUM_PARTITION_CONTEXTS][PARTITION_TYPES];
#if CONFIG_AB4X4
BLOCK_SIZE_TYPE b_partitioning[4][4][4];
#endif
BLOCK_SIZE_TYPE mb_partitioning[4][4];
BLOCK_SIZE_TYPE sb_partitioning[4];
BLOCK_SIZE_TYPE sb64_partitioning;
......
This diff is collapsed.
......@@ -689,7 +689,11 @@ static int64_t rd_pick_intra4x4mby_modes(VP9_COMP *cpi, MACROBLOCK *mb,
int *Distortion, int64_t best_rd) {
int i;
MACROBLOCKD *const xd = &mb->e_mbd;
#if CONFIG_AB4X4
int cost = 0;
#else
int cost = mb->mbmode_cost[xd->frame_type][I4X4_PRED];
#endif
int distortion = 0;
int tot_rate_y = 0;
int64_t total_rd = 0;
......@@ -719,7 +723,6 @@ static int64_t rd_pick_intra4x4mby_modes(VP9_COMP *cpi, MACROBLOCK *mb,
total_rd += rd_pick_intra4x4block(cpi, mb, i, &best_mode, bmode_costs,
t_above + x_idx, t_left + y_idx,
&r, &ry, &d);
cost += r;
distortion += d;
tot_rate_y += ry;
......@@ -753,6 +756,13 @@ static int64_t rd_pick_intra_sby_mode(VP9_COMP *cpi, MACROBLOCK *x,
TX_SIZE UNINITIALIZED_IS_SAFE(best_tx);
int i;
#if CONFIG_AB4X4
if (bsize < BLOCK_SIZE_SB8X8) {
x->e_mbd.mode_info_context->mbmi.txfm_size = TX_4X4;
return best_rd;
}
#endif
for (i = 0; i < NB_TXFM_MODES; i++)
txfm_cache[i] = INT64_MAX;
......@@ -2308,7 +2318,11 @@ void vp9_rd_pick_intra_mode_sb(VP9_COMP *cpi, MACROBLOCK *x,
&dist_uv, &uv_skip,
(bsize < BLOCK_SIZE_SB8X8) ? BLOCK_SIZE_SB8X8 :
bsize);
#if CONFIG_AB4X4
if (bsize < BLOCK_SIZE_SB8X8)
#else
if (bsize == BLOCK_SIZE_SB8X8)
#endif
err4x4 = rd_pick_intra4x4mby_modes(cpi, x, &rate4x4_y,
&rate4x4_y_tokenonly,
&dist4x4_y, err);
......@@ -2321,7 +2335,11 @@ void vp9_rd_pick_intra_mode_sb(VP9_COMP *cpi, MACROBLOCK *x,
sizeof(x->sb32_context[xd->sb_index].txfm_rd_diff));
xd->mode_info_context->mbmi.mode = mode;
xd->mode_info_context->mbmi.txfm_size = txfm_size;
#if CONFIG_AB4X4
} else if (bsize < BLOCK_SIZE_SB8X8 && err4x4 < err) {
#else
} else if (bsize == BLOCK_SIZE_SB8X8 && err4x4 < err) {
#endif
*returnrate = rate4x4_y + rate_uv +
vp9_cost_bit(vp9_get_pred_prob(cm, xd, PRED_MBSKIP), 0);
*returndist = dist4x4_y + (dist_uv >> 2);
......@@ -2463,7 +2481,9 @@ int64_t vp9_rd_pick_inter_mode_sb(VP9_COMP *cpi, MACROBLOCK *x,
i++) {
mbmi->txfm_size = i;
rd_pick_intra_sbuv_mode(cpi, x, &rate_uv_intra[i], &rate_uv_tokenonly[i],
&dist_uv[i], &skip_uv[i], bsize);
&dist_uv[i], &skip_uv[i],
(bsize < BLOCK_SIZE_SB8X8) ? BLOCK_SIZE_SB8X8 :
bsize);
mode_uv[i] = mbmi->uv_mode;
}
}
......@@ -2493,6 +2513,7 @@ int64_t vp9_rd_pick_inter_mode_sb(VP9_COMP *cpi, MACROBLOCK *x,
|| (cpi->ref_frame_flags & flag_list[ref_frame]))) {
continue;
}
if (cpi->speed > 0) {
if (!(ref_frame_mask & (1 << ref_frame))) {
continue;
......@@ -2539,10 +2560,18 @@ int64_t vp9_rd_pick_inter_mode_sb(VP9_COMP *cpi, MACROBLOCK *x,
mbmi->interp_filter = cm->mcomp_filter_type;
vp9_setup_interp_filters(xd, mbmi->interp_filter, &cpi->common);
#if CONFIG_AB4X4
if (bsize >= BLOCK_SIZE_SB8X8 &&
(this_mode == I4X4_PRED || this_mode == SPLITMV))
continue;
if (bsize < BLOCK_SIZE_SB8X8 &&
!(this_mode == I4X4_PRED || this_mode == SPLITMV))
continue;
#else
if (bsize != BLOCK_SIZE_SB8X8 &&
(this_mode == I4X4_PRED || this_mode == SPLITMV))
continue;
#endif
if (comp_pred) {
if (ref_frame == ALTREF_FRAME) {
......@@ -2605,7 +2634,6 @@ int64_t vp9_rd_pick_inter_mode_sb(VP9_COMP *cpi, MACROBLOCK *x,
// Note the rate value returned here includes the cost of coding
// the I4X4_PRED mode : x->mbmode_cost[xd->frame_type][I4X4_PRED];
assert(bsize == BLOCK_SIZE_SB8X8);
mbmi->txfm_size = TX_4X4;
rd_pick_intra4x4mby_modes(cpi, x, &rate, &rate_y,
&distortion_y, INT64_MAX);
......@@ -3001,7 +3029,13 @@ int64_t vp9_rd_pick_inter_mode_sb(VP9_COMP *cpi, MACROBLOCK *x,
}
}
#if CONFIG_AB4X4
if (best_rd == INT64_MAX && bsize < BLOCK_SIZE_SB8X8) {
*returnrate = INT_MAX;
*returndistortion = INT_MAX;
return best_rd;
}
#endif
assert((cm->mcomp_filter_type == SWITCHABLE) ||
(cm->mcomp_filter_type == best_mbmode.interp_filter) ||
......
......@@ -119,7 +119,12 @@ static void tokenize_b(int plane, int block, BLOCK_SIZE_TYPE bsize,
TOKENEXTRA *t = *tp; /* store tokens starting here */
const int eob = xd->plane[plane].eobs[block];
const int16_t *qcoeff_ptr = BLOCK_OFFSET(xd->plane[plane].qcoeff, block, 16);
#if CONFIG_AB4X4
const BLOCK_SIZE_TYPE sb_type = (mbmi->sb_type < BLOCK_SIZE_SB8X8) ?
BLOCK_SIZE_SB8X8 : mbmi->sb_type;
#else
const BLOCK_SIZE_TYPE sb_type = mbmi->sb_type;
#endif
const int bwl = b_width_log2(sb_type);
const int off = block >> (2 * tx_size);
const int mod = bwl - tx_size - xd->plane[plane].subsampling_x;
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment