Commit 1f26840f authored by Jingning Han's avatar Jingning Han

Enable recursive partition down to 4x4

This commit allows the rate-distortion optimization recursion
at encoder to go down to 4x4 block size. It deprecates the use
of I4X4_PRED and SPLITMV syntax elements from bit-stream
writing/reading. Will remove the unused probability models in
the next patch.

The partition type search and bit-stream are now capable of
supporting the rectangular partition of 8x8 block, i.e., 8x4
and 4x8. Need to revise the rate-distortion parts to get these
two partition tested in the rd loop.

Change-Id: I0dfe3b90a1507ad6138db10cc58e6e237a06a9d6
parent dee12bdf
......@@ -222,12 +222,21 @@ static INLINE int b_height_log2(BLOCK_SIZE_TYPE sb_type) {
static INLINE int mi_width_log2(BLOCK_SIZE_TYPE sb_type) {
int a = b_width_log2(sb_type) - 1;
#if CONFIG_AB4X4
// align 4x4 block to mode_info
if (a < 0)
a = 0;
#endif
assert(a >= 0);
return a;
}
static INLINE int mi_height_log2(BLOCK_SIZE_TYPE sb_type) {
int a = b_height_log2(sb_type) - 1;
#if CONFIG_AB4X4
if (a < 0)
a = 0;
#endif
assert(a >= 0);
return a;
}
......@@ -442,9 +451,12 @@ static INLINE void update_partition_context(MACROBLOCKD *xd,
int bhl = mi_height_log2(sb_type);
int boffset = mi_width_log2(BLOCK_SIZE_SB64X64) - bsl;
int i;
// skip macroblock partition
#if !CONFIG_AB4X4
// skip 8x8 block partition
if (bsl == 0)
return;
#endif
// update the partition context at the end notes. set partition bits
// of block sizes larger than the current one to be one, and partition
......@@ -492,7 +504,11 @@ static INLINE int partition_plane_context(MACROBLOCKD *xd,
above = (above > 0);
left = (left > 0);
#if CONFIG_AB4X4
return (left * 2 + above) + bsl * PARTITION_PLOFFSET;
#else
return (left * 2 + above) + (bsl - 1) * PARTITION_PLOFFSET;
#endif
}
static BLOCK_SIZE_TYPE get_subsize(BLOCK_SIZE_TYPE bsize,
......@@ -509,6 +525,10 @@ static BLOCK_SIZE_TYPE get_subsize(BLOCK_SIZE_TYPE bsize,
subsize = BLOCK_SIZE_SB32X16;
else if (bsize == BLOCK_SIZE_MB16X16)
subsize = BLOCK_SIZE_SB16X8;
#if CONFIG_AB4X4
else if (bsize == BLOCK_SIZE_SB8X8)
subsize = BLOCK_SIZE_SB8X4;
#endif
else
assert(0);
break;
......@@ -519,6 +539,10 @@ static BLOCK_SIZE_TYPE get_subsize(BLOCK_SIZE_TYPE bsize,
subsize = BLOCK_SIZE_SB16X32;
else if (bsize == BLOCK_SIZE_MB16X16)
subsize = BLOCK_SIZE_SB8X16;
#if CONFIG_AB4X4
else if (bsize == BLOCK_SIZE_SB8X8)
subsize = BLOCK_SIZE_SB4X8;
#endif
else
assert(0);
break;
......@@ -529,6 +553,10 @@ static BLOCK_SIZE_TYPE get_subsize(BLOCK_SIZE_TYPE bsize,
subsize = BLOCK_SIZE_MB16X16;
else if (bsize == BLOCK_SIZE_MB16X16)
subsize = BLOCK_SIZE_SB8X8;
#if CONFIG_AB4X4
else if (bsize == BLOCK_SIZE_SB8X8)
subsize = BLOCK_SIZE_AB4X4;
#endif
else
assert(0);
break;
......
......@@ -106,6 +106,12 @@ const vp9_prob vp9_sub_mv_ref_prob2 [SUBMVREF_COUNT][VP9_SUBMVREFS - 1] = {
const vp9_prob vp9_partition_probs[NUM_PARTITION_CONTEXTS]
[PARTITION_TYPES - 1] = {
// FIXME(jingning,rbultje) put real probabilities here
#if CONFIG_AB4X4
{202, 162, 107},
{16, 2, 169},
{3, 246, 19},
{104, 90, 134},
#endif
{202, 162, 107},
{16, 2, 169},
{3, 246, 19},
......@@ -513,6 +519,7 @@ void vp9_adapt_mode_probs(VP9_COMMON *cm) {
vp9_sub_mv_ref_tree, fc->sub_mv_ref_counts[i],
fc->pre_sub_mv_ref_prob[i], fc->sub_mv_ref_prob[i],
LEFT4X4);
for (i = 0; i < NUM_PARTITION_CONTEXTS; i++)
update_mode_probs(PARTITION_TYPES, vp9_partition_tree,
fc->partition_counts[i], fc->pre_partition_prob[i],
......
......@@ -48,6 +48,10 @@ typedef enum PARTITION_TYPE {
} PARTITION_TYPE;
#define PARTITION_PLOFFSET 4 // number of probability models per block size
#if CONFIG_AB4X4
#define NUM_PARTITION_CONTEXTS (4 * PARTITION_PLOFFSET)
#else
#define NUM_PARTITION_CONTEXTS (3 * PARTITION_PLOFFSET)
#endif
#endif // VP9_COMMON_VP9_ENUMS_H_
......@@ -119,13 +119,25 @@ static void kfread_modes(VP9D_COMP *pbi, MODE_INFO *m,
m->mbmi.mb_skip_coeff = vp9_read(r, vp9_get_pred_prob(cm, xd, PRED_MBSKIP));
// luma mode
#if CONFIG_AB4X4
if (m->mbmi.sb_type >= BLOCK_SIZE_SB8X8)
m->mbmi.mode = read_kf_sb_ymode(r,
cm->sb_kf_ymode_prob[cm->kf_ymode_probs_index]);
else
m->mbmi.mode = I4X4_PRED;
#else
m->mbmi.mode = m->mbmi.sb_type > BLOCK_SIZE_SB8X8 ?
read_kf_sb_ymode(r, cm->sb_kf_ymode_prob[cm->kf_ymode_probs_index]):
read_kf_mb_ymode(r, cm->kf_ymode_prob[cm->kf_ymode_probs_index]);
#endif
m->mbmi.ref_frame = INTRA_FRAME;
#if CONFIG_AB4X4
if (m->mbmi.sb_type < BLOCK_SIZE_SB8X8) {
#else
if (m->mbmi.mode == I4X4_PRED) {
#endif
int i;
for (i = 0; i < 4; ++i) {
const B_PREDICTION_MODE a = above_block_mode(m, i, mis);
......@@ -139,7 +151,13 @@ static void kfread_modes(VP9D_COMP *pbi, MODE_INFO *m,
m->mbmi.uv_mode = read_uv_mode(r, cm->kf_uv_mode_prob[m->mbmi.mode]);
if (cm->txfm_mode == TX_MODE_SELECT &&
!m->mbmi.mb_skip_coeff && m->mbmi.mode != I4X4_PRED) {
!m->mbmi.mb_skip_coeff &&
#if CONFIG_AB4X4
m->mbmi.sb_type >= BLOCK_SIZE_SB8X8
#else
m->mbmi.mode != I4X4_PRED
#endif
) {
const int allow_16x16 = m->mbmi.sb_type >= BLOCK_SIZE_MB16X16;
const int allow_32x32 = m->mbmi.sb_type >= BLOCK_SIZE_SB32X32;
m->mbmi.txfm_size = select_txfm_size(cm, r, allow_16x16, allow_32x32);
......@@ -150,7 +168,13 @@ static void kfread_modes(VP9D_COMP *pbi, MODE_INFO *m,
m->mbmi.sb_type >= BLOCK_SIZE_MB16X16 &&
m->mbmi.mode <= TM_PRED) {
m->mbmi.txfm_size = TX_16X16;
} else if (cm->txfm_mode >= ALLOW_8X8 && m->mbmi.mode != I4X4_PRED) {
} else if (cm->txfm_mode >= ALLOW_8X8 &&
#if CONFIG_AB4X4
m->mbmi.sb_type >= BLOCK_SIZE_SB8X8
#else
m->mbmi.mode != I4X4_PRED
#endif
) {
m->mbmi.txfm_size = TX_8X8;
} else {
m->mbmi.txfm_size = TX_4X4;
......@@ -618,9 +642,16 @@ static void read_mb_modes_mv(VP9D_COMP *pbi, MODE_INFO *mi, MB_MODE_INFO *mbmi,
if (vp9_segfeature_active(xd, mbmi->segment_id, SEG_LVL_SKIP)) {
mbmi->mode = ZEROMV;
} else {
#if CONFIG_AB4X4
if (mbmi->sb_type >= BLOCK_SIZE_SB8X8)
mbmi->mode = read_sb_mv_ref(r, mv_ref_p);
else
mbmi->mode = SPLITMV;
#else
mbmi->mode = mbmi->sb_type > BLOCK_SIZE_SB8X8 ?
read_sb_mv_ref(r, mv_ref_p)
: read_mv_ref(r, mv_ref_p);
#endif
vp9_accum_mv_refs(cm, mbmi->mode, mbmi->mb_mode_context[ref_frame]);
}
......@@ -820,6 +851,14 @@ static void read_mb_modes_mv(VP9D_COMP *pbi, MODE_INFO *mi, MB_MODE_INFO *mbmi,
// required for left and above block mv
mv0->as_int = 0;
#if CONFIG_AB4X4
if (mbmi->sb_type >= BLOCK_SIZE_SB8X8) {
mbmi->mode = read_sb_ymode(r, cm->fc.sb_ymode_prob);
cm->fc.sb_ymode_counts[mbmi->mode]++;
} else {
mbmi->mode = I4X4_PRED;
}
#else
if (mbmi->sb_type > BLOCK_SIZE_SB8X8) {
mbmi->mode = read_sb_ymode(r, cm->fc.sb_ymode_prob);
cm->fc.sb_ymode_counts[mbmi->mode]++;
......@@ -827,9 +866,14 @@ static void read_mb_modes_mv(VP9D_COMP *pbi, MODE_INFO *mi, MB_MODE_INFO *mbmi,
mbmi->mode = read_ymode(r, cm->fc.ymode_prob);
cm->fc.ymode_counts[mbmi->mode]++;
}
#endif
// If MB mode is I4X4_PRED read the block modes
#if CONFIG_AB4X4
if (mbmi->sb_type < BLOCK_SIZE_SB8X8) {
#else
if (mbmi->mode == I4X4_PRED) {
#endif
int j = 0;
do {
int m = read_bmode(r, cm->fc.bmode_prob);
......@@ -842,9 +886,14 @@ static void read_mb_modes_mv(VP9D_COMP *pbi, MODE_INFO *mi, MB_MODE_INFO *mbmi,
cm->fc.uv_mode_counts[mbmi->mode][mbmi->uv_mode]++;
}
#if CONFIG_AB4X4
if (cm->txfm_mode == TX_MODE_SELECT && mbmi->mb_skip_coeff == 0 &&
mbmi->sb_type >= BLOCK_SIZE_SB8X8) {
#else
if (cm->txfm_mode == TX_MODE_SELECT && mbmi->mb_skip_coeff == 0 &&
((mbmi->ref_frame == INTRA_FRAME && mbmi->mode != I4X4_PRED) ||
(mbmi->ref_frame != INTRA_FRAME && mbmi->mode != SPLITMV))) {
#endif
const int allow_16x16 = mbmi->sb_type >= BLOCK_SIZE_MB16X16;
const int allow_32x32 = mbmi->sb_type >= BLOCK_SIZE_SB32X32;
mbmi->txfm_size = select_txfm_size(cm, r, allow_16x16, allow_32x32);
......@@ -852,13 +901,21 @@ static void read_mb_modes_mv(VP9D_COMP *pbi, MODE_INFO *mi, MB_MODE_INFO *mbmi,
cm->txfm_mode >= ALLOW_32X32) {
mbmi->txfm_size = TX_32X32;
} else if (cm->txfm_mode >= ALLOW_16X16 &&
mbmi->sb_type >= BLOCK_SIZE_MB16X16 &&
((mbmi->ref_frame == INTRA_FRAME && mbmi->mode <= TM_PRED) ||
(mbmi->ref_frame != INTRA_FRAME && mbmi->mode != SPLITMV))) {
mbmi->sb_type >= BLOCK_SIZE_MB16X16
#if !CONFIG_AB4X4
&& ((mbmi->ref_frame == INTRA_FRAME && mbmi->mode <= TM_PRED) ||
(mbmi->ref_frame != INTRA_FRAME && mbmi->mode != SPLITMV))
#endif
) {
mbmi->txfm_size = TX_16X16;
} else if (cm->txfm_mode >= ALLOW_8X8 &&
#if CONFIG_AB4X4
(mbmi->sb_type >= BLOCK_SIZE_SB8X8))
#else
(!(mbmi->ref_frame == INTRA_FRAME && mbmi->mode == I4X4_PRED) &&
!(mbmi->ref_frame != INTRA_FRAME && mbmi->mode == SPLITMV))) {
!(mbmi->ref_frame != INTRA_FRAME && mbmi->mode == SPLITMV)))
#endif
{
mbmi->txfm_size = TX_8X8;
} else {
mbmi->txfm_size = TX_4X4;
......
......@@ -417,10 +417,14 @@ static void decode_modes_b(VP9D_COMP *pbi, int mi_row, int mi_col,
vp9_decode_mb_mode_mv(pbi, xd, mi_row, mi_col, r);
set_refs(pbi, mi_row, mi_col);
#if CONFIG_AB4X4
if (bsize < BLOCK_SIZE_SB8X8)
#else
if (bsize == BLOCK_SIZE_SB8X8 &&
(xd->mode_info_context->mbmi.mode == SPLITMV ||
xd->mode_info_context->mbmi.mode == I4X4_PRED))
decode_atom(pbi, xd, mi_row, mi_col, r, bsize);
#endif
decode_atom(pbi, xd, mi_row, mi_col, r, BLOCK_SIZE_SB8X8);
else
decode_sb(pbi, xd, mi_row, mi_col, r, bsize);
......@@ -439,7 +443,17 @@ static void decode_modes_sb(VP9D_COMP *pbi, int mi_row, int mi_col,
if (mi_row >= pc->mi_rows || mi_col >= pc->mi_cols)
return;
#if CONFIG_AB4X4
if (bsize < BLOCK_SIZE_SB8X8)
if (xd->ab_index != 0)
return;
#endif
#if CONFIG_AB4X4
if (bsize >= BLOCK_SIZE_SB8X8) {
#else
if (bsize > BLOCK_SIZE_SB8X8) {
#endif
int pl;
// read the partition information
xd->left_seg_context = pc->left_seg_context + (mi_row & MI_MASK);
......@@ -451,6 +465,7 @@ static void decode_modes_sb(VP9D_COMP *pbi, int mi_row, int mi_col,
}
subsize = get_subsize(bsize, partition);
switch (partition) {
case PARTITION_NONE:
decode_modes_b(pbi, mi_row, mi_col, r, subsize);
......@@ -476,8 +491,13 @@ static void decode_modes_sb(VP9D_COMP *pbi, int mi_row, int mi_col,
assert(0);
}
// update partition context
#if CONFIG_AB4X4
if (bsize >= BLOCK_SIZE_SB8X8 &&
(bsize == BLOCK_SIZE_SB8X8 || partition != PARTITION_SPLIT)) {
#else
if (bsize > BLOCK_SIZE_SB8X8 &&
(bsize == BLOCK_SIZE_MB16X16 || partition != PARTITION_SPLIT)) {
#endif
set_partition_seg_context(pc, xd, mi_row, mi_col);
update_partition_context(xd, subsize, bsize);
}
......
......@@ -629,12 +629,21 @@ static void pack_inter_mode_mvs(VP9_COMP *cpi, MODE_INFO *m,
active_section = 6;
#endif
#if CONFIG_AB4X4
if (m->mbmi.sb_type >= BLOCK_SIZE_SB8X8)
write_sb_ymode(bc, mode, pc->fc.sb_ymode_prob);
#else
if (m->mbmi.sb_type > BLOCK_SIZE_SB8X8)
write_sb_ymode(bc, mode, pc->fc.sb_ymode_prob);
else
write_ymode(bc, mode, pc->fc.ymode_prob);
#endif
#if CONFIG_AB4X4
if (m->mbmi.sb_type < BLOCK_SIZE_SB8X8) {
#else
if (mode == I4X4_PRED) {
#endif
int j = 0;
do {
write_bmode(bc, m->bmi[j].as_mode.first,
......@@ -654,11 +663,16 @@ static void pack_inter_mode_mvs(VP9_COMP *cpi, MODE_INFO *m,
// If segment skip is not enabled code the mode.
if (!vp9_segfeature_active(xd, segment_id, SEG_LVL_SKIP)) {
#if CONFIG_AB4X4
if (mi->sb_type >= BLOCK_SIZE_SB8X8)
write_sb_mv_ref(bc, mode, mv_ref_p);
#else
if (mi->sb_type > BLOCK_SIZE_SB8X8) {
write_sb_mv_ref(bc, mode, mv_ref_p);
} else {
write_mv_ref(bc, mode, mv_ref_p);
}
#endif
vp9_accum_mv_refs(&cpi->common, mode, mi->mb_mode_context[rf]);
}
......@@ -744,11 +758,20 @@ static void pack_inter_mode_mvs(VP9_COMP *cpi, MODE_INFO *m,
}
}
#if CONFIG_AB4X4
if (((rf == INTRA_FRAME && mi->sb_type >= BLOCK_SIZE_SB8X8) ||
(rf != INTRA_FRAME && mi->sb_type >= BLOCK_SIZE_SB8X8)) &&
pc->txfm_mode == TX_MODE_SELECT &&
!(skip_coeff || vp9_segfeature_active(xd, segment_id,
SEG_LVL_SKIP)))
#else
if (((rf == INTRA_FRAME && mode != I4X4_PRED) ||
(rf != INTRA_FRAME && mode != SPLITMV)) &&
pc->txfm_mode == TX_MODE_SELECT &&
!(skip_coeff || vp9_segfeature_active(xd, segment_id,
SEG_LVL_SKIP))) {
SEG_LVL_SKIP)))
#endif
{
TX_SIZE sz = mi->txfm_size;
// FIXME(rbultje) code ternary symbol once all experiments are merged
vp9_write(bc, sz != TX_4X4, pc->prob_tx[0]);
......@@ -780,12 +803,21 @@ static void write_mb_modes_kf(const VP9_COMP *cpi,
vp9_write(bc, skip_coeff, vp9_get_pred_prob(c, xd, PRED_MBSKIP));
}
#if CONFIG_AB4X4
if (m->mbmi.sb_type >= BLOCK_SIZE_SB8X8)
sb_kfwrite_ymode(bc, ym, c->sb_kf_ymode_prob[c->kf_ymode_probs_index]);
#else
if (m->mbmi.sb_type > BLOCK_SIZE_SB8X8)
sb_kfwrite_ymode(bc, ym, c->sb_kf_ymode_prob[c->kf_ymode_probs_index]);
else
kfwrite_ymode(bc, ym, c->kf_ymode_prob[c->kf_ymode_probs_index]);
#endif
#if CONFIG_AB4X4
if (m->mbmi.sb_type < BLOCK_SIZE_SB8X8) {
#else
if (ym == I4X4_PRED) {
#endif
int i = 0;
do {
const B_PREDICTION_MODE a = above_block_mode(m, i, mis);
......@@ -803,8 +835,13 @@ static void write_mb_modes_kf(const VP9_COMP *cpi,
write_uv_mode(bc, m->mbmi.uv_mode, c->kf_uv_mode_prob[ym]);
#if CONFIG_AB4X4
if (m->mbmi.sb_type >= BLOCK_SIZE_SB8X8 && c->txfm_mode == TX_MODE_SELECT &&
!(skip_coeff || vp9_segfeature_active(xd, segment_id, SEG_LVL_SKIP))) {
#else
if (ym != I4X4_PRED && c->txfm_mode == TX_MODE_SELECT &&
!(skip_coeff || vp9_segfeature_active(xd, segment_id, SEG_LVL_SKIP))) {
#endif
TX_SIZE sz = m->mbmi.txfm_size;
// FIXME(rbultje) code ternary symbol once all experiments are merged
vp9_write(bc, sz != TX_4X4, c->prob_tx[0]);
......@@ -876,7 +913,19 @@ static void write_modes_sb(VP9_COMP *cpi, MODE_INFO *m, vp9_writer *bc,
else
assert(0);
#if CONFIG_AB4X4
if (bsize == BLOCK_SIZE_SB8X8 && m->mbmi.sb_type < BLOCK_SIZE_SB8X8)
partition = PARTITION_SPLIT;
if (bsize < BLOCK_SIZE_SB8X8)
if (xd->ab_index != 0)
return;
#endif
#if CONFIG_AB4X4
if (bsize >= BLOCK_SIZE_SB8X8) {
#else
if (bsize > BLOCK_SIZE_SB8X8) {
#endif
int pl;
xd->left_seg_context = cm->left_seg_context + (mi_row & MI_MASK);
xd->above_seg_context = cm->above_seg_context + mi_col;
......@@ -915,8 +964,13 @@ static void write_modes_sb(VP9_COMP *cpi, MODE_INFO *m, vp9_writer *bc,
}
// update partition context
#if CONFIG_AB4X4
if (bsize >= BLOCK_SIZE_SB8X8 &&
(bsize == BLOCK_SIZE_SB8X8 || partition != PARTITION_SPLIT)) {
#else
if (bsize > BLOCK_SIZE_SB8X8 &&
(bsize == BLOCK_SIZE_MB16X16 || partition != PARTITION_SPLIT)) {
#endif
set_partition_seg_context(cm, xd, mi_row, mi_col);
update_partition_context(xd, subsize, bsize);
}
......
......@@ -140,7 +140,12 @@ struct macroblock {
// TODO(jingning): Need to refactor the structure arrays that buffers the
// coding mode decisions of each partition type.
PICK_MODE_CONTEXT sb8_context[4][4][4];
#if CONFIG_AB4X4
PICK_MODE_CONTEXT ab4x4_context[4][4][4];
PICK_MODE_CONTEXT sb8x4_context[4][4][4];
PICK_MODE_CONTEXT sb4x8_context[4][4][4];
#endif
PICK_MODE_CONTEXT sb8x8_context[4][4][4];
PICK_MODE_CONTEXT sb8x16_context[4][4][2];
PICK_MODE_CONTEXT sb16x8_context[4][4][2];
PICK_MODE_CONTEXT mb_context[4][4];
......@@ -153,6 +158,9 @@ struct macroblock {
PICK_MODE_CONTEXT sb64_context;
int partition_cost[NUM_PARTITION_CONTEXTS][PARTITION_TYPES];
#if CONFIG_AB4X4
BLOCK_SIZE_TYPE b_partitioning[4][4][4];
#endif
BLOCK_SIZE_TYPE mb_partitioning[4][4];
BLOCK_SIZE_TYPE sb_partitioning[4];
BLOCK_SIZE_TYPE sb64_partitioning;
......
......@@ -361,8 +361,8 @@ static void update_state(VP9_COMP *cpi,
assert(mb_mode_index < MAX_MODES);
assert(mi->mbmi.ref_frame < MAX_REF_FRAMES);
#endif
assert(mi->mbmi.sb_type == bsize);
assert(mi->mbmi.sb_type == bsize);
// Restore the coding context of the MB to that that was in place
// when the mode was picked for it
for (y = 0; y < bh; y++) {
......@@ -640,6 +640,12 @@ static void pick_sb_modes(VP9_COMP *cpi, int mi_row, int mi_col,
MACROBLOCK *const x = &cpi->mb;
MACROBLOCKD *const xd = &x->e_mbd;
#if CONFIG_AB4X4
if (bsize < BLOCK_SIZE_SB8X8)
if (xd->ab_index != 0)
return;
#endif
set_offsets(cpi, mi_row, mi_col, bsize);
xd->mode_info_context->mbmi.sb_type = bsize;
if (cpi->oxcf.tuning == VP8_TUNE_SSIM)
......@@ -718,7 +724,14 @@ static void set_block_index(MACROBLOCKD *xd, int idx,
} else if (bsize >= BLOCK_SIZE_MB16X16) {
xd->mb_index = idx;
} else {
#if CONFIG_AB4X4
if (bsize >= BLOCK_SIZE_SB8X8)
xd->b_index = idx;
else
xd->ab_index = idx;
#else
xd->b_index = idx;
#endif
}
}
......@@ -749,7 +762,15 @@ static PICK_MODE_CONTEXT *get_block_context(MACROBLOCK *x,
case BLOCK_SIZE_SB8X16:
return &x->sb8x16_context[xd->sb_index][xd->mb_index][xd->b_index];
case BLOCK_SIZE_SB8X8:
return &x->sb8_context[xd->sb_index][xd->mb_index][xd->b_index];
return &x->sb8x8_context[xd->sb_index][xd->mb_index][xd->b_index];
#if CONFIG_AB4X4
case BLOCK_SIZE_SB8X4:
return &x->sb8x4_context[xd->sb_index][xd->mb_index][xd->b_index];
case BLOCK_SIZE_SB4X8:
return &x->sb4x8_context[xd->sb_index][xd->mb_index][xd->b_index];
case BLOCK_SIZE_AB4X4:
return &x->ab4x4_context[xd->sb_index][xd->mb_index][xd->b_index];
#endif
default:
assert(0);
return NULL;
......@@ -766,6 +787,10 @@ static BLOCK_SIZE_TYPE *get_sb_partitioning(MACROBLOCK *x,
return &x->sb_partitioning[xd->sb_index];
case BLOCK_SIZE_MB16X16:
return &x->mb_partitioning[xd->sb_index][xd->mb_index];
#if CONFIG_AB4X4
case BLOCK_SIZE_SB8X8:
return &x->b_partitioning[xd->sb_index][xd->mb_index][xd->b_index];
#endif
default:
assert(0);
return NULL;
......@@ -833,14 +858,20 @@ static void encode_sb(VP9_COMP *cpi, TOKENEXTRA **tp,
MACROBLOCK *const x = &cpi->mb;
MACROBLOCKD *const xd = &x->e_mbd;
BLOCK_SIZE_TYPE c1 = BLOCK_SIZE_SB8X8;
const int bsl = mi_width_log2(bsize), bs = 1 << (bsl - 1);
const int bsl = mi_width_log2(bsize), bs = (1 << bsl) / 2;
int bwl, bhl;
int UNINITIALIZED_IS_SAFE(pl);
if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols)
return;
if (bsize > BLOCK_SIZE_SB8X8) {
#if CONFIG_AB4X4
c1 = BLOCK_SIZE_AB4X4;
if (bsize >= BLOCK_SIZE_SB8X8)
#else
if (bsize > BLOCK_SIZE_SB8X8)
#endif
{
set_partition_seg_context(cm, xd, mi_row, mi_col);
pl = partition_plane_context(xd, bsize);
c1 = *(get_sb_partitioning(x, bsize));
......@@ -849,8 +880,18 @@ static void encode_sb(VP9_COMP *cpi, TOKENEXTRA **tp,
bwl = mi_width_log2(c1), bhl = mi_height_log2(c1);
if (bsl == bwl && bsl == bhl) {
#if CONFIG_AB4X4
if (output_enabled && bsize >= BLOCK_SIZE_SB8X8) {
if (bsize > BLOCK_SIZE_SB8X8 ||
(bsize == BLOCK_SIZE_SB8X8 && c1 == bsize))
cpi->partition_count[pl][PARTITION_NONE]++;
else
cpi->partition_count[pl][PARTITION_SPLIT]++;
}
#else
if (output_enabled && bsize > BLOCK_SIZE_SB8X8)
cpi->partition_count[pl][PARTITION_NONE]++;
#endif
encode_b(cpi, tp, mi_row, mi_col, output_enabled, c1, -1);
} else if (bsl == bhl && bsl > bwl) {
if (output_enabled)
......@@ -867,14 +908,7 @@ static void encode_sb(VP9_COMP *cpi, TOKENEXTRA **tp,
int i;
assert(bwl < bsl && bhl < bsl);
if (bsize == BLOCK_SIZE_SB64X64) {
subsize = BLOCK_SIZE_SB32X32;
} else if (bsize == BLOCK_SIZE_SB32X32) {
subsize = BLOCK_SIZE_MB16X16;
} else {
assert(bsize == BLOCK_SIZE_MB16X16);
subsize = BLOCK_SIZE_SB8X8;
}
subsize = get_subsize(bsize, PARTITION_SPLIT);
if (output_enabled)
cpi->partition_count[pl][PARTITION_SPLIT]++;
......@@ -888,8 +922,13 @@ static void encode_sb(VP9_COMP *cpi, TOKENEXTRA **tp,
}
}
#if CONFIG_AB4X4
if (bsize >= BLOCK_SIZE_SB8X8 &&
(bsize == BLOCK_SIZE_SB8X8 || bsl == bwl || bsl == bhl)) {
#else
if (bsize > BLOCK_SIZE_SB8X8 &&
(bsize == BLOCK_SIZE_MB16X16 || bsl == bwl || bsl == bhl)) {
#endif
set_partition_seg_context(cm, xd, mi_row, mi_col);
update_partition_context(xd, c1, bsize);
}
......@@ -907,7 +946,7 @@ static void rd_pick_partition(VP9_COMP *cpi, TOKENEXTRA **tp,
MACROBLOCK *const x = &cpi->mb;
MACROBLOCKD *const xd = &x->e_mbd;
int bsl = b_width_log2(bsize), bs = 1 << bsl;
int msl = mi_height_log2(bsize), ms = 1 << msl;
int ms = bs / 2;
ENTROPY_CONTEXT l[16 * MAX_MB_PLANE], a[16 * MAX_MB_PLANE];
PARTITION_CONTEXT sl[8], sa[8];
TOKENEXTRA *tp_orig = *tp;
......@@ -915,6 +954,15 @@ static void rd_pick_partition(VP9_COMP *cpi, TOKENEXTRA **tp,
BLOCK_SIZE_TYPE subsize;
int srate = INT_MAX, sdist = INT_MAX;
#if CONFIG_AB4X4
if (bsize < BLOCK_SIZE_SB8X8)
if (xd->ab_index != 0) {
*rate = 0;
*dist = 0;
return;