Commit 97096f5f authored by Ronald S. Bultje's avatar Ronald S. Bultje Committed by Gerrit Code Review
Browse files

Merge changes I02e7f64a,Ide954b00,Idc8b5977 into experimental

* changes:
  Fix another typo in 4x4-transform-for-i8x8-intra-pred coeff contexts.
  8x8 transform support in splitmv.
  Use SPLITMV_PARTITIONING instead of a plain integer type.
parents 7906ed09 f72fdf1c
...@@ -175,6 +175,14 @@ typedef enum { ...@@ -175,6 +175,14 @@ typedef enum {
#define VP8_BINTRAMODES (B_HU_PRED + 1) /* 10 */ #define VP8_BINTRAMODES (B_HU_PRED + 1) /* 10 */
#define VP8_SUBMVREFS (1 + NEW4X4 - LEFT4X4) #define VP8_SUBMVREFS (1 + NEW4X4 - LEFT4X4)
typedef enum {
PARTITIONING_16X8 = 0,
PARTITIONING_8X16,
PARTITIONING_8X8,
PARTITIONING_4X4,
NB_PARTITIONINGS,
} SPLITMV_PARTITIONING_TYPE;
/* For keyframes, intra block modes are predicted by the (already decoded) /* For keyframes, intra block modes are predicted by the (already decoded)
modes for the Y blocks to the left and above us; for interframes, there modes for the Y blocks to the left and above us; for interframes, there
is a single probability table. */ is a single probability table. */
...@@ -216,7 +224,7 @@ typedef struct { ...@@ -216,7 +224,7 @@ typedef struct {
int mv_ref_index[MAX_REF_FRAMES]; int mv_ref_index[MAX_REF_FRAMES];
#endif #endif
unsigned char partitioning; SPLITMV_PARTITIONING_TYPE partitioning;
unsigned char mb_skip_coeff; /* does this mb has coefficients at all, 1=no coefficients, 0=need decode tokens */ unsigned char mb_skip_coeff; /* does this mb has coefficients at all, 1=no coefficients, 0=need decode tokens */
unsigned char need_to_clamp_mvs; unsigned char need_to_clamp_mvs;
unsigned char need_to_clamp_secondmv; unsigned char need_to_clamp_secondmv;
......
...@@ -215,9 +215,9 @@ const vp8_tree_index vp8_uv_mode_tree[VP8_UV_MODES * 2 - 2] = { ...@@ -215,9 +215,9 @@ const vp8_tree_index vp8_uv_mode_tree[VP8_UV_MODES * 2 - 2] = {
}; };
const vp8_tree_index vp8_mbsplit_tree[6] = { const vp8_tree_index vp8_mbsplit_tree[6] = {
-3, 2, -PARTITIONING_4X4, 2,
-2, 4, -PARTITIONING_8X8, 4,
-0, -1 -PARTITIONING_16X8, -PARTITIONING_8X16,
}; };
const vp8_tree_index vp8_mv_ref_tree[8] = { const vp8_tree_index vp8_mv_ref_tree[8] = {
......
...@@ -783,7 +783,7 @@ int vp8_post_proc_frame(VP8_COMMON *oci, YV12_BUFFER_CONFIG *dest, vp8_ppflags_t ...@@ -783,7 +783,7 @@ int vp8_post_proc_frame(VP8_COMMON *oci, YV12_BUFFER_CONFIG *dest, vp8_ppflags_t
if (mi->mbmi.mode == SPLITMV) { if (mi->mbmi.mode == SPLITMV) {
switch (mi->mbmi.partitioning) { switch (mi->mbmi.partitioning) {
case 0 : { /* mv_top_bottom */ case PARTITIONING_16X8 : { /* mv_top_bottom */
union b_mode_info *bmi = &mi->bmi[0]; union b_mode_info *bmi = &mi->bmi[0];
MV *mv = &bmi->mv.as_mv; MV *mv = &bmi->mv.as_mv;
...@@ -803,7 +803,7 @@ int vp8_post_proc_frame(VP8_COMMON *oci, YV12_BUFFER_CONFIG *dest, vp8_ppflags_t ...@@ -803,7 +803,7 @@ int vp8_post_proc_frame(VP8_COMMON *oci, YV12_BUFFER_CONFIG *dest, vp8_ppflags_t
break; break;
} }
case 1 : { /* mv_left_right */ case PARTITIONING_8X16 : { /* mv_left_right */
union b_mode_info *bmi = &mi->bmi[0]; union b_mode_info *bmi = &mi->bmi[0];
MV *mv = &bmi->mv.as_mv; MV *mv = &bmi->mv.as_mv;
...@@ -823,7 +823,7 @@ int vp8_post_proc_frame(VP8_COMMON *oci, YV12_BUFFER_CONFIG *dest, vp8_ppflags_t ...@@ -823,7 +823,7 @@ int vp8_post_proc_frame(VP8_COMMON *oci, YV12_BUFFER_CONFIG *dest, vp8_ppflags_t
break; break;
} }
case 2 : { /* mv_quarters */ case PARTITIONING_8X8 : { /* mv_quarters */
union b_mode_info *bmi = &mi->bmi[0]; union b_mode_info *bmi = &mi->bmi[0];
MV *mv = &bmi->mv.as_mv; MV *mv = &bmi->mv.as_mv;
...@@ -858,6 +858,7 @@ int vp8_post_proc_frame(VP8_COMMON *oci, YV12_BUFFER_CONFIG *dest, vp8_ppflags_t ...@@ -858,6 +858,7 @@ int vp8_post_proc_frame(VP8_COMMON *oci, YV12_BUFFER_CONFIG *dest, vp8_ppflags_t
vp8_blit_line(x0 + 12, x1, y0 + 12, y1, y_buffer, y_stride); vp8_blit_line(x0 + 12, x1, y0 + 12, y1, y_buffer, y_stride);
break; break;
} }
case PARTITIONING_4X4:
default : { default : {
union b_mode_info *bmi = mi->bmi; union b_mode_info *bmi = mi->bmi;
int bx0, by0; int bx0, by0;
......
...@@ -965,7 +965,7 @@ static void build_inter4x4_predictors_mb(MACROBLOCKD *xd) { ...@@ -965,7 +965,7 @@ static void build_inter4x4_predictors_mb(MACROBLOCKD *xd) {
MB_MODE_INFO * mbmi = &xd->mode_info_context->mbmi; MB_MODE_INFO * mbmi = &xd->mode_info_context->mbmi;
BLOCKD *blockd = xd->block; BLOCKD *blockd = xd->block;
if (xd->mode_info_context->mbmi.partitioning < 3) { if (xd->mode_info_context->mbmi.partitioning != PARTITIONING_4X4) {
blockd[ 0].bmi = xd->mode_info_context->bmi[ 0]; blockd[ 0].bmi = xd->mode_info_context->bmi[ 0];
blockd[ 2].bmi = xd->mode_info_context->bmi[ 2]; blockd[ 2].bmi = xd->mode_info_context->bmi[ 2];
blockd[ 8].bmi = xd->mode_info_context->bmi[ 8]; blockd[ 8].bmi = xd->mode_info_context->bmi[ 8];
......
...@@ -1285,10 +1285,12 @@ static void read_mb_modes_mv(VP8D_COMP *pbi, MODE_INFO *mi, MB_MODE_INFO *mbmi, ...@@ -1285,10 +1285,12 @@ static void read_mb_modes_mv(VP8D_COMP *pbi, MODE_INFO *mi, MB_MODE_INFO *mbmi,
#if CONFIG_TX_SELECT #if CONFIG_TX_SELECT
if (cm->txfm_mode == TX_MODE_SELECT && mbmi->mb_skip_coeff == 0 && if (cm->txfm_mode == TX_MODE_SELECT && mbmi->mb_skip_coeff == 0 &&
((mbmi->ref_frame == INTRA_FRAME && mbmi->mode <= I8X8_PRED) || ((mbmi->ref_frame == INTRA_FRAME && mbmi->mode <= I8X8_PRED) ||
(mbmi->ref_frame != INTRA_FRAME && mbmi->mode != SPLITMV))) { (mbmi->ref_frame != INTRA_FRAME && !(mbmi->mode == SPLITMV &&
mbmi->partitioning == PARTITIONING_4X4)))) {
// FIXME(rbultje) code ternary symbol once all experiments are merged // FIXME(rbultje) code ternary symbol once all experiments are merged
mbmi->txfm_size = vp8_read(bc, cm->prob_tx[0]); mbmi->txfm_size = vp8_read(bc, cm->prob_tx[0]);
if (mbmi->txfm_size != TX_4X4 && mbmi->mode != I8X8_PRED) if (mbmi->txfm_size != TX_4X4 && mbmi->mode != I8X8_PRED &&
mbmi->mode != SPLITMV)
mbmi->txfm_size += vp8_read(bc, cm->prob_tx[1]); mbmi->txfm_size += vp8_read(bc, cm->prob_tx[1]);
} else } else
#endif #endif
...@@ -1297,8 +1299,9 @@ static void read_mb_modes_mv(VP8D_COMP *pbi, MODE_INFO *mi, MB_MODE_INFO *mbmi, ...@@ -1297,8 +1299,9 @@ static void read_mb_modes_mv(VP8D_COMP *pbi, MODE_INFO *mi, MB_MODE_INFO *mbmi,
(mbmi->ref_frame != INTRA_FRAME && mbmi->mode != SPLITMV))) { (mbmi->ref_frame != INTRA_FRAME && mbmi->mode != SPLITMV))) {
mbmi->txfm_size = TX_16X16; mbmi->txfm_size = TX_16X16;
} else if (cm->txfm_mode >= ALLOW_8X8 && } else if (cm->txfm_mode >= ALLOW_8X8 &&
((mbmi->ref_frame == INTRA_FRAME && mbmi->mode != B_PRED) || (!(mbmi->ref_frame == INTRA_FRAME && mbmi->mode == B_PRED) &&
(mbmi->ref_frame != INTRA_FRAME && mbmi->mode != SPLITMV))) { !(mbmi->ref_frame != INTRA_FRAME && mbmi->mode == SPLITMV &&
mbmi->partitioning == PARTITIONING_4X4))) {
mbmi->txfm_size = TX_8X8; mbmi->txfm_size = TX_8X8;
} else { } else {
mbmi->txfm_size = TX_4X4; mbmi->txfm_size = TX_4X4;
......
...@@ -384,10 +384,16 @@ static void decode_macroblock(VP8D_COMP *pbi, MACROBLOCKD *xd, ...@@ -384,10 +384,16 @@ static void decode_macroblock(VP8D_COMP *pbi, MACROBLOCKD *xd,
} }
} }
} else if (mode == SPLITMV) { } else if (mode == SPLITMV) {
DEQUANT_INVOKE(&pbi->dequant, idct_add_y_block) if (tx_size == TX_8X8) {
(xd->qcoeff, xd->block[0].dequant, vp8_dequant_idct_add_y_block_8x8_c(xd->qcoeff, xd->block[0].dequant,
xd->predictor, xd->dst.y_buffer, xd->predictor, xd->dst.y_buffer,
xd->dst.y_stride, xd->eobs); xd->dst.y_stride, xd->eobs, xd);
} else {
DEQUANT_INVOKE(&pbi->dequant,
idct_add_y_block)(xd->qcoeff, xd->block[0].dequant,
xd->predictor, xd->dst.y_buffer,
xd->dst.y_stride, xd->eobs);
}
} else { } else {
BLOCKD *b = &xd->block[24]; BLOCKD *b = &xd->block[24];
...@@ -489,8 +495,10 @@ static void decode_macroblock(VP8D_COMP *pbi, MACROBLOCKD *xd, ...@@ -489,8 +495,10 @@ static void decode_macroblock(VP8D_COMP *pbi, MACROBLOCKD *xd,
if (!xd->mode_info_context->mbmi.encoded_as_sb) { if (!xd->mode_info_context->mbmi.encoded_as_sb) {
#endif #endif
if ((tx_size == TX_8X8 && if ((tx_size == TX_8X8 &&
xd->mode_info_context->mbmi.mode != I8X8_PRED) xd->mode_info_context->mbmi.mode != I8X8_PRED &&
|| tx_size == TX_16X16) xd->mode_info_context->mbmi.mode != SPLITMV)
|| tx_size == TX_16X16
)
DEQUANT_INVOKE(&pbi->dequant, idct_add_uv_block_8x8) // DEQUANT_INVOKE(&pbi->dequant, idct_add_uv_block_8x8) //
(xd->qcoeff + 16 * 16, xd->block[16].dequant, (xd->qcoeff + 16 * 16, xd->block[16].dequant,
xd->predictor + 16 * 16, xd->dst.u_buffer, xd->dst.v_buffer, xd->predictor + 16 * 16, xd->dst.u_buffer, xd->dst.v_buffer,
......
...@@ -493,7 +493,8 @@ int vp8_decode_mb_tokens_8x8(VP8D_COMP *pbi, MACROBLOCKD *xd, ...@@ -493,7 +493,8 @@ int vp8_decode_mb_tokens_8x8(VP8D_COMP *pbi, MACROBLOCKD *xd,
INT16 *qcoeff_ptr = &xd->qcoeff[0]; INT16 *qcoeff_ptr = &xd->qcoeff[0];
TX_TYPE tx_type = DCT_DCT; TX_TYPE tx_type = DCT_DCT;
int bufthred = (xd->mode_info_context->mbmi.mode == I8X8_PRED) ? 16 : 24; int bufthred = (xd->mode_info_context->mbmi.mode == I8X8_PRED ||
xd->mode_info_context->mbmi.mode == SPLITMV) ? 16 : 24;
if (xd->mode_info_context->mbmi.mode != B_PRED && if (xd->mode_info_context->mbmi.mode != B_PRED &&
xd->mode_info_context->mbmi.mode != SPLITMV && xd->mode_info_context->mbmi.mode != SPLITMV &&
xd->mode_info_context->mbmi.mode != I8X8_PRED) { xd->mode_info_context->mbmi.mode != I8X8_PRED) {
......
...@@ -1219,7 +1219,8 @@ static void pack_inter_mode_mvs(VP8_COMP *const cpi, vp8_writer *const bc) { ...@@ -1219,7 +1219,8 @@ static void pack_inter_mode_mvs(VP8_COMP *const cpi, vp8_writer *const bc) {
#if CONFIG_TX_SELECT #if CONFIG_TX_SELECT
if (((rf == INTRA_FRAME && mode <= I8X8_PRED) || if (((rf == INTRA_FRAME && mode <= I8X8_PRED) ||
(rf != INTRA_FRAME && mode != SPLITMV)) && (rf != INTRA_FRAME && !(mode == SPLITMV &&
mi->partitioning == PARTITIONING_4X4))) &&
pc->txfm_mode == TX_MODE_SELECT && pc->txfm_mode == TX_MODE_SELECT &&
!((pc->mb_no_coeff_skip && mi->mb_skip_coeff) || !((pc->mb_no_coeff_skip && mi->mb_skip_coeff) ||
(segfeature_active(xd, segment_id, SEG_LVL_EOB) && (segfeature_active(xd, segment_id, SEG_LVL_EOB) &&
...@@ -1227,7 +1228,7 @@ static void pack_inter_mode_mvs(VP8_COMP *const cpi, vp8_writer *const bc) { ...@@ -1227,7 +1228,7 @@ static void pack_inter_mode_mvs(VP8_COMP *const cpi, vp8_writer *const bc) {
TX_SIZE sz = mi->txfm_size; TX_SIZE sz = mi->txfm_size;
// FIXME(rbultje) code ternary symbol once all experiments are merged // FIXME(rbultje) code ternary symbol once all experiments are merged
vp8_write(bc, sz != TX_4X4, pc->prob_tx[0]); vp8_write(bc, sz != TX_4X4, pc->prob_tx[0]);
if (sz != TX_4X4 && mode != I8X8_PRED) if (sz != TX_4X4 && mode != I8X8_PRED && mode != SPLITMV)
vp8_write(bc, sz != TX_8X8, pc->prob_tx[1]); vp8_write(bc, sz != TX_8X8, pc->prob_tx[1]);
} }
#endif #endif
......
...@@ -2161,7 +2161,9 @@ void vp8cx_encode_inter_macroblock (VP8_COMP *cpi, MACROBLOCK *x, ...@@ -2161,7 +2161,9 @@ void vp8cx_encode_inter_macroblock (VP8_COMP *cpi, MACROBLOCK *x,
if (mbmi->mode != B_PRED && mbmi->mode != I8X8_PRED && if (mbmi->mode != B_PRED && mbmi->mode != I8X8_PRED &&
mbmi->mode != SPLITMV) { mbmi->mode != SPLITMV) {
cpi->txfm_count[mbmi->txfm_size]++; cpi->txfm_count[mbmi->txfm_size]++;
} else if (mbmi->mode == I8X8_PRED) { } else if (mbmi->mode == I8X8_PRED ||
(mbmi->mode == SPLITMV &&
mbmi->partitioning != PARTITIONING_4X4)) {
cpi->txfm_count_8x8p[mbmi->txfm_size]++; cpi->txfm_count_8x8p[mbmi->txfm_size]++;
} }
} else } else
...@@ -2169,8 +2171,10 @@ void vp8cx_encode_inter_macroblock (VP8_COMP *cpi, MACROBLOCK *x, ...@@ -2169,8 +2171,10 @@ void vp8cx_encode_inter_macroblock (VP8_COMP *cpi, MACROBLOCK *x,
if (mbmi->mode != B_PRED && mbmi->mode != I8X8_PRED && if (mbmi->mode != B_PRED && mbmi->mode != I8X8_PRED &&
mbmi->mode != SPLITMV && cpi->common.txfm_mode >= ALLOW_16X16) { mbmi->mode != SPLITMV && cpi->common.txfm_mode >= ALLOW_16X16) {
mbmi->txfm_size = TX_16X16; mbmi->txfm_size = TX_16X16;
} else if (mbmi->mode != B_PRED && mbmi->mode != SPLITMV && } else if (mbmi->mode != B_PRED &&
cpi->common.txfm_mode >= ALLOW_8X8) { !(mbmi->mode == SPLITMV &&
mbmi->partitioning == PARTITIONING_4X4) &&
cpi->common.txfm_mode >= ALLOW_8X8) {
mbmi->txfm_size = TX_8X8; mbmi->txfm_size = TX_8X8;
} else { } else {
mbmi->txfm_size = TX_4X4; mbmi->txfm_size = TX_4X4;
......
...@@ -636,6 +636,7 @@ void vp8_optimize_mby_8x8(MACROBLOCK *x, const VP8_ENCODER_RTCD *rtcd) { ...@@ -636,6 +636,7 @@ void vp8_optimize_mby_8x8(MACROBLOCK *x, const VP8_ENCODER_RTCD *rtcd) {
ENTROPY_CONTEXT_PLANES t_above, t_left; ENTROPY_CONTEXT_PLANES t_above, t_left;
ENTROPY_CONTEXT *ta; ENTROPY_CONTEXT *ta;
ENTROPY_CONTEXT *tl; ENTROPY_CONTEXT *tl;
int has_2nd_order = x->e_mbd.mode_info_context->mbmi.mode != SPLITMV;
if (!x->e_mbd.above_context || !x->e_mbd.left_context) if (!x->e_mbd.above_context || !x->e_mbd.left_context)
return; return;
...@@ -645,7 +646,7 @@ void vp8_optimize_mby_8x8(MACROBLOCK *x, const VP8_ENCODER_RTCD *rtcd) { ...@@ -645,7 +646,7 @@ void vp8_optimize_mby_8x8(MACROBLOCK *x, const VP8_ENCODER_RTCD *rtcd) {
ta = (ENTROPY_CONTEXT *)&t_above; ta = (ENTROPY_CONTEXT *)&t_above;
tl = (ENTROPY_CONTEXT *)&t_left; tl = (ENTROPY_CONTEXT *)&t_left;
type = PLANE_TYPE_Y_NO_DC; type = has_2nd_order ? PLANE_TYPE_Y_NO_DC : PLANE_TYPE_Y_WITH_DC;
for (b = 0; b < 16; b += 4) { for (b = 0; b < 16; b += 4) {
optimize_b(x, b, type, optimize_b(x, b, type,
ta + vp8_block2above_8x8[b], tl + vp8_block2left_8x8[b], ta + vp8_block2above_8x8[b], tl + vp8_block2left_8x8[b],
...@@ -655,8 +656,11 @@ void vp8_optimize_mby_8x8(MACROBLOCK *x, const VP8_ENCODER_RTCD *rtcd) { ...@@ -655,8 +656,11 @@ void vp8_optimize_mby_8x8(MACROBLOCK *x, const VP8_ENCODER_RTCD *rtcd) {
} }
// 8x8 always have 2nd roder haar block // 8x8 always have 2nd roder haar block
check_reset_8x8_2nd_coeffs(&x->e_mbd, if (has_2nd_order) {
ta + vp8_block2above_8x8[24], tl + vp8_block2left_8x8[24]); check_reset_8x8_2nd_coeffs(&x->e_mbd,
ta + vp8_block2above_8x8[24],
tl + vp8_block2left_8x8[24]);
}
} }
void vp8_optimize_mbuv_8x8(MACROBLOCK *x, const VP8_ENCODER_RTCD *rtcd) { void vp8_optimize_mbuv_8x8(MACROBLOCK *x, const VP8_ENCODER_RTCD *rtcd) {
...@@ -896,11 +900,25 @@ void vp8_encode_inter16x16(const VP8_ENCODER_RTCD *rtcd, MACROBLOCK *x) { ...@@ -896,11 +900,25 @@ void vp8_encode_inter16x16(const VP8_ENCODER_RTCD *rtcd, MACROBLOCK *x) {
optimize_mb_16x16(x, rtcd); optimize_mb_16x16(x, rtcd);
vp8_inverse_transform_mb_16x16(IF_RTCD(&rtcd->common->idct), xd); vp8_inverse_transform_mb_16x16(IF_RTCD(&rtcd->common->idct), xd);
} else if (tx_size == TX_8X8) { } else if (tx_size == TX_8X8) {
vp8_transform_mb_8x8(x); if (xd->mode_info_context->mbmi.mode == SPLITMV) {
vp8_quantize_mb_8x8(x); assert(xd->mode_info_context->mbmi.partitioning != PARTITIONING_4X4);
if (x->optimize) vp8_transform_mby_8x8(x);
optimize_mb_8x8(x, rtcd); vp8_transform_mbuv_4x4(x);
vp8_inverse_transform_mb_8x8(IF_RTCD(&rtcd->common->idct), xd); vp8_quantize_mby_8x8(x);
vp8_quantize_mbuv_4x4(x);
if (x->optimize) {
vp8_optimize_mby_8x8(x, rtcd);
vp8_optimize_mbuv_4x4(x, rtcd);
}
vp8_inverse_transform_mby_8x8(IF_RTCD(&rtcd->common->idct), xd);
vp8_inverse_transform_mbuv_4x4(IF_RTCD(&rtcd->common->idct), xd);
} else {
vp8_transform_mb_8x8(x);
vp8_quantize_mb_8x8(x);
if (x->optimize)
optimize_mb_8x8(x, rtcd);
vp8_inverse_transform_mb_8x8(IF_RTCD(&rtcd->common->idct), xd);
}
} else { } else {
transform_mb_4x4(x); transform_mb_4x4(x);
vp8_quantize_mb_4x4(x); vp8_quantize_mb_4x4(x);
......
...@@ -366,10 +366,10 @@ typedef struct VP8_ENCODER_RTCD { ...@@ -366,10 +366,10 @@ typedef struct VP8_ENCODER_RTCD {
} VP8_ENCODER_RTCD; } VP8_ENCODER_RTCD;
enum { enum {
BLOCK_16X8, BLOCK_16X8 = PARTITIONING_16X8,
BLOCK_8X16, BLOCK_8X16 = PARTITIONING_8X16,
BLOCK_8X8, BLOCK_8X8 = PARTITIONING_8X8,
BLOCK_4X4, BLOCK_4X4 = PARTITIONING_4X4,
BLOCK_16X16, BLOCK_16X16,
BLOCK_MAX_SEGMENTS, BLOCK_MAX_SEGMENTS,
BLOCK_32X32 = BLOCK_MAX_SEGMENTS, BLOCK_32X32 = BLOCK_MAX_SEGMENTS,
......
This diff is collapsed.
...@@ -589,7 +589,8 @@ void vp8_tokenize_mb(VP8_COMP *cpi, ...@@ -589,7 +589,8 @@ void vp8_tokenize_mb(VP8_COMP *cpi,
xd->mode_info_context->mbmi.mb_skip_coeff = mb_is_skippable_16x16(xd); xd->mode_info_context->mbmi.mb_skip_coeff = mb_is_skippable_16x16(xd);
break; break;
case TX_8X8: case TX_8X8:
if (xd->mode_info_context->mbmi.mode == I8X8_PRED) if (xd->mode_info_context->mbmi.mode == I8X8_PRED ||
xd->mode_info_context->mbmi.mode == SPLITMV)
xd->mode_info_context->mbmi.mb_skip_coeff = mb_is_skippable_8x8_4x4uv(xd, 0); xd->mode_info_context->mbmi.mb_skip_coeff = mb_is_skippable_8x8_4x4uv(xd, 0);
else else
xd->mode_info_context->mbmi.mb_skip_coeff = mb_is_skippable_8x8(xd, has_y2_block); xd->mode_info_context->mbmi.mb_skip_coeff = mb_is_skippable_8x8(xd, has_y2_block);
...@@ -665,7 +666,8 @@ void vp8_tokenize_mb(VP8_COMP *cpi, ...@@ -665,7 +666,8 @@ void vp8_tokenize_mb(VP8_COMP *cpi,
*(A + vp8_block2above_8x8[b] + 1) = *(A + vp8_block2above_8x8[b]); *(A + vp8_block2above_8x8[b] + 1) = *(A + vp8_block2above_8x8[b]);
*(L + vp8_block2left_8x8[b] + 1) = *(L + vp8_block2left_8x8[b]); *(L + vp8_block2left_8x8[b] + 1) = *(L + vp8_block2left_8x8[b]);
} }
if (xd->mode_info_context->mbmi.mode == I8X8_PRED) { if (xd->mode_info_context->mbmi.mode == I8X8_PRED ||
xd->mode_info_context->mbmi.mode == SPLITMV) {
tokenize1st_order_chroma_4x4(xd, t, cpi, dry_run); tokenize1st_order_chroma_4x4(xd, t, cpi, dry_run);
} else { } else {
for (b = 16; b < 24; b += 4) { for (b = 16; b < 24; b += 4) {
...@@ -1260,7 +1262,8 @@ void vp8_stuff_mb(VP8_COMP *cpi, MACROBLOCKD *xd, TOKENEXTRA **t, int dry_run) { ...@@ -1260,7 +1262,8 @@ void vp8_stuff_mb(VP8_COMP *cpi, MACROBLOCKD *xd, TOKENEXTRA **t, int dry_run) {
if (tx_size == TX_16X16) { if (tx_size == TX_16X16) {
vp8_stuff_mb_16x16(cpi, xd, t, dry_run); vp8_stuff_mb_16x16(cpi, xd, t, dry_run);
} else if (tx_size == TX_8X8) { } else if (tx_size == TX_8X8) {
if (xd->mode_info_context->mbmi.mode == I8X8_PRED) { if (xd->mode_info_context->mbmi.mode == I8X8_PRED ||
xd->mode_info_context->mbmi.mode == SPLITMV) {
vp8_stuff_mb_8x8_4x4uv(cpi, xd, t, dry_run); vp8_stuff_mb_8x8_4x4uv(cpi, xd, t, dry_run);
} else { } else {
vp8_stuff_mb_8x8(cpi, xd, t, dry_run); vp8_stuff_mb_8x8(cpi, xd, t, dry_run);
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment