Commit d406334f authored by Deb Mukherjee's avatar Deb Mukherjee

Cleanups for hybrid transform

Some cleanups on the transform size and type selection logic.

Change-Id: If2e9675459482242cf83b4f7de7634505e3f6dac
parent d4c329c4
......@@ -479,6 +479,33 @@ static void txfm_map(BLOCKD *b, B_PREDICTION_MODE bmode) {
break;
}
}
static TX_TYPE get_tx_type(MACROBLOCKD *xd, BLOCKD *b) {
TX_TYPE tx_type = DCT_DCT;
#if CONFIG_HYBRIDTRANSFORM16X16
if (xd->mode_info_context->mbmi.txfm_size == TX_16X16) {
if (xd->mode_info_context->mbmi.mode < I8X8_PRED &&
xd->q_index < ACTIVE_HT16)
tx_type = b->bmi.as_mode.tx_type;
return tx_type;
}
#endif
#if CONFIG_HYBRIDTRANSFORM8X8
if (xd->mode_info_context->mbmi.txfm_size == TX_8X8) {
if (xd->mode_info_context->mbmi.mode == I8X8_PRED)
tx_type = b->bmi.as_mode.tx_type;
return tx_type;
}
#endif
#if CONFIG_HYBRIDTRANSFORM
if (xd->mode_info_context->mbmi.txfm_size == TX_4X4) {
if (xd->mode_info_context->mbmi.mode == B_PRED &&
xd->q_index < ACTIVE_HT)
tx_type = b->bmi.as_mode.tx_type;
return tx_type;
}
#endif
}
#endif
extern void vp8_build_block_doffsets(MACROBLOCKD *xd);
......@@ -488,8 +515,8 @@ static void update_blockd_bmi(MACROBLOCKD *xd) {
int i;
int is_4x4;
is_4x4 = (xd->mode_info_context->mbmi.mode == SPLITMV) ||
(xd->mode_info_context->mbmi.mode == I8X8_PRED) ||
(xd->mode_info_context->mbmi.mode == B_PRED);
(xd->mode_info_context->mbmi.mode == I8X8_PRED) ||
(xd->mode_info_context->mbmi.mode == B_PRED);
if (is_4x4) {
for (i = 0; i < 16; i++) {
......
......@@ -40,10 +40,6 @@
#include <stdio.h>
#ifdef DEC_DEBUG
int dec_debug = 0;
#endif
#define COEFCOUNT_TESTING
static int merge_index(int v, int n, int modulus) {
......@@ -209,17 +205,6 @@ static void skip_recon_mb(VP8D_COMP *pbi, MACROBLOCKD *xd) {
}
#endif
}
#ifdef DEC_DEBUG
if (dec_debug) {
int i, j;
printf("Generating predictors\n");
for (i = 0; i < 16; i++) {
for (j = 0; j < 16; j++) printf("%3d ", xd->dst.y_buffer[i * xd->dst.y_stride + j]);
printf("\n");
}
}
#endif
}
extern const int vp8_i8x8_block[4];
......@@ -255,14 +240,10 @@ static void decode_macroblock(VP8D_COMP *pbi, MACROBLOCKD *xd,
xd->mode_info_context->mbmi.mode == NEARMV ||
xd->mode_info_context->mbmi.mode == NEARESTMV)
xd->mode_info_context->mbmi.txfm_size = TX_16X16;
else if (pbi->common.txfm_mode == ALLOW_8X8 &&
xd->mode_info_context->mbmi.mode != I8X8_PRED &&
xd->mode_info_context->mbmi.mode != B_PRED)
#else
if (pbi->common.txfm_mode == ALLOW_8X8 &&
xd->mode_info_context->mbmi.mode != I8X8_PRED &&
xd->mode_info_context->mbmi.mode != B_PRED)
else
#endif
if (pbi->common.txfm_mode == ALLOW_8X8 &&
xd->mode_info_context->mbmi.mode != B_PRED)
xd->mode_info_context->mbmi.txfm_size = TX_8X8;
else
xd->mode_info_context->mbmi.txfm_size = TX_4X4;
......@@ -272,30 +253,18 @@ static void decode_macroblock(VP8D_COMP *pbi, MACROBLOCKD *xd,
xd->mode_info_context->mbmi.mode == NEWMV ||
xd->mode_info_context->mbmi.mode == ZEROMV ||
xd->mode_info_context->mbmi.mode == NEARMV ||
xd->mode_info_context->mbmi.mode == NEARESTMV) {
xd->mode_info_context->mbmi.mode == NEARESTMV)
xd->mode_info_context->mbmi.txfm_size = TX_16X16;
} else if (pbi->common.txfm_mode == ALLOW_8X8 &&
xd->mode_info_context->mbmi.mode != I8X8_PRED &&
xd->mode_info_context->mbmi.mode != B_PRED &&
xd->mode_info_context->mbmi.mode != SPLITMV) {
#else
if (pbi->common.txfm_mode == ALLOW_8X8 &&
xd->mode_info_context->mbmi.mode != I8X8_PRED &&
xd->mode_info_context->mbmi.mode != B_PRED &&
xd->mode_info_context->mbmi.mode != SPLITMV) {
else
#endif
if (pbi->common.txfm_mode == ALLOW_8X8 &&
xd->mode_info_context->mbmi.mode != B_PRED &&
xd->mode_info_context->mbmi.mode != SPLITMV)
xd->mode_info_context->mbmi.txfm_size = TX_8X8;
}
else {
else
xd->mode_info_context->mbmi.txfm_size = TX_4X4;
}
}
#if CONFIG_HYBRIDTRANSFORM8X8
if (xd->mode_info_context->mbmi.mode == I8X8_PRED) {
xd->mode_info_context->mbmi.txfm_size = TX_8X8;
}
#endif
#if CONFIG_SUPERBLOCKS
if (xd->mode_info_context->mbmi.encoded_as_sb) {
xd->mode_info_context->mbmi.txfm_size = TX_8X8;
......@@ -303,6 +272,41 @@ static void decode_macroblock(VP8D_COMP *pbi, MACROBLOCKD *xd,
#endif
tx_type = xd->mode_info_context->mbmi.txfm_size;
mode = xd->mode_info_context->mbmi.mode;
#if CONFIG_HYBRIDTRANSFORM
// parse transform types for intra 4x4 mode
QIndex = xd->q_index;
active_ht = (QIndex < ACTIVE_HT);
if (mode == B_PRED) {
for (i = 0; i < 16; i++) {
BLOCKD *b = &xd->block[i];
int b_mode = xd->mode_info_context->bmi[i].as_mode.first;
if(active_ht)
txfm_map(b, b_mode);
} // loop over 4x4 blocks
}
#endif
#if CONFIG_HYBRIDTRANSFORM8X8
if (mode == I8X8_PRED) {
for (i = 0; i < 4; i++) {
int ib = vp8_i8x8_block[i];
BLOCKD *b = &xd->block[ib];
int i8x8mode = b->bmi.as_mode.first;
txfm_map(b, pred_mode_conv(i8x8mode));
}
}
#endif
#if CONFIG_HYBRIDTRANSFORM16X16
active_ht16 = (QIndex < ACTIVE_HT16);
if (mode < I8X8_PRED) {
BLOCKD *b = &xd->block[0];
if(active_ht16)
txfm_map(b, pred_mode_conv(mode));
}
#endif
if (xd->mode_info_context->mbmi.mb_skip_coeff) {
vp8_reset_mb_tokens_context(xd);
......@@ -329,19 +333,9 @@ static void decode_macroblock(VP8D_COMP *pbi, MACROBLOCKD *xd,
eobtotal = vp8_decode_mb_tokens_8x8(pbi, xd);
else
eobtotal = vp8_decode_mb_tokens(pbi, xd);
#ifdef DEC_DEBUG
if (dec_debug) {
printf("\nTokens (%d)\n", eobtotal);
for (i = 0; i < 400; i++) {
printf("%3d ", xd->qcoeff[i]);
if (i % 16 == 15) printf("\n");
}
printf("\n");
}
#endif
}
mode = xd->mode_info_context->mbmi.mode;
//mode = xd->mode_info_context->mbmi.mode;
#if CONFIG_SWITCHABLE_INTERP
if (pbi->common.frame_type != KEY_FRAME)
vp8_setup_interp_filters(xd, xd->mode_info_context->mbmi.interp_filter,
......@@ -366,39 +360,10 @@ static void decode_macroblock(VP8D_COMP *pbi, MACROBLOCKD *xd,
}
}
#ifdef DEC_DEBUG
if (dec_debug) {
int i, j;
printf("Generating predictors\n");
for (i = 0; i < 16; i++) {
for (j = 0; j < 16; j++) printf("%3d ", xd->dst.y_buffer[i * xd->dst.y_stride + j]);
printf("\n");
}
}
#endif
// moved to be performed before detokenization
// if (xd->segmentation_enabled)
// mb_init_dequantizer(pbi, xd);
#if CONFIG_HYBRIDTRANSFORM || CONFIG_HYBRIDTRANSFORM16X16
// parse transform types for intra 4x4 mode
QIndex = xd->q_index;
active_ht = (QIndex < ACTIVE_HT);
if (mode == B_PRED) {
for (i = 0; i < 16; i++) {
BLOCKD *b = &xd->block[i];
int b_mode = xd->mode_info_context->bmi[i].as_mode.first;
if(active_ht)
txfm_map(b, b_mode);
} // loop over 4x4 blocks
}
#endif
#if CONFIG_HYBRIDTRANSFORM16X16
active_ht16 = (QIndex < ACTIVE_HT16);
#endif
/* do prediction */
if (xd->mode_info_context->mbmi.ref_frame == INTRA_FRAME) {
#if CONFIG_SUPERBLOCKS
......@@ -451,9 +416,6 @@ static void decode_macroblock(VP8D_COMP *pbi, MACROBLOCKD *xd,
unsigned char *pre = xd->block[ib].predictor;
unsigned char *dst = *(xd->block[ib].base_dst) + xd->block[ib].dst;
int stride = xd->dst.y_stride;
tx_type = TX_4X4;
xd->mode_info_context->mbmi.txfm_size = TX_4X4;
#endif
b = &xd->block[ib];
......@@ -462,7 +424,6 @@ static void decode_macroblock(VP8D_COMP *pbi, MACROBLOCKD *xd,
(b, i8x8mode, b->predictor);
#if CONFIG_HYBRIDTRANSFORM8X8
txfm_map(b, pred_mode_conv(i8x8mode));
vp8_ht_dequant_idct_add_8x8_c(b->bmi.as_mode.tx_type,
q, dq, pre, dst, 16, stride);
q += 64;
......@@ -598,16 +559,6 @@ static void decode_macroblock(VP8D_COMP *pbi, MACROBLOCKD *xd,
continue; // only happens for SBs, which are already in dest buffer
#endif
DEQUANT_INVOKE(&pbi->dequant, block_2x2)(b);
#ifdef DEC_DEBUG
if (dec_debug) {
int j;
printf("DQcoeff Haar\n");
for (j = 0; j < 16; j++) {
printf("%d ", b->dqcoeff[j]);
}
printf("\n");
}
#endif
IDCT_INVOKE(RTCD_VTABLE(idct), ihaar2)(&b->dqcoeff[0], b->diff, 8);
((int *)b->qcoeff)[0] = 0;// 2nd order block are set to 0 after inverse transform
((int *)b->qcoeff)[1] = 0;
......@@ -665,7 +616,8 @@ static void decode_macroblock(VP8D_COMP *pbi, MACROBLOCKD *xd,
#if CONFIG_SUPERBLOCKS
if (!xd->mode_info_context->mbmi.encoded_as_sb) {
#endif
if (tx_type == TX_8X8
if ((tx_type == TX_8X8 &&
xd->mode_info_context->mbmi.mode != I8X8_PRED)
#if CONFIG_TX16X16 || CONFIG_HYBRIDTRANSFORM16X16
|| tx_type == TX_16X16
#endif
......@@ -753,10 +705,6 @@ decode_sb_row(VP8D_COMP *pbi, VP8_COMMON *pc, int mbrow, MACROBLOCKD *xd) {
continue;
}
#ifdef DEC_DEBUG
dec_debug = (pc->current_video_frame == 0 && mb_row == 0 && mb_col == 0);
#endif
// Set above context pointer
xd->above_context = pc->above_context + mb_col;
xd->left_context = pc->left_context + (i >> 1);
......
......@@ -1833,7 +1833,6 @@ void vp8cx_encode_intra_macro_block(VP8_COMP *cpi,
else
#endif
if (cpi->common.txfm_mode == ALLOW_8X8
&& mbmi->mode != I8X8_PRED
&& mbmi->mode != B_PRED) {
mbmi->txfm_size = TX_8X8;
cpi->t8x8_count++;
......@@ -1930,7 +1929,6 @@ void vp8cx_encode_inter_macroblock (VP8_COMP *cpi, MACROBLOCK *x,
} else
#endif
if (cpi->common.txfm_mode == ALLOW_8X8
&& mbmi->mode != I8X8_PRED
&& mbmi->mode != B_PRED
&& mbmi->mode != SPLITMV) {
mbmi->txfm_size = TX_8X8;
......@@ -2111,7 +2109,6 @@ void vp8cx_encode_inter_superblock(VP8_COMP *cpi, MACROBLOCK *x, TOKENEXTRA **t,
/* test code: set transform size based on mode selection */
if (cpi->common.txfm_mode == ALLOW_8X8
&& x->e_mbd.mode_info_context->mbmi.mode != I8X8_PRED
&& x->e_mbd.mode_info_context->mbmi.mode != B_PRED
&& x->e_mbd.mode_info_context->mbmi.mode != SPLITMV) {
x->e_mbd.mode_info_context->mbmi.txfm_size = TX_8X8;
......
......@@ -912,7 +912,7 @@ static void super_block_yrd_8x8(MACROBLOCK *x,
xd->above_context = ta + x_idx;
xd->left_context = tl + y_idx;
r += vp8_rdcost_mby_8x8(x, 0);
skippable = skippable && mby_is_skippable_8x8(xd);
skippable = skippable && mby_is_skippable_8x8(xd, 1);
}
*distortion = (d >> 2);
......@@ -1271,7 +1271,7 @@ static int64_t rd_pick_intra16x16mby_mode(VP8_COMP *cpi,
#if CONFIG_TX16X16
skip = mby_is_skippable_16x16(xd);
#else
skip = mby_is_skippable_8x8(xd);
skip = mby_is_skippable_8x8(xd, 1);
#endif
mode_selected = mode;
#if CONFIG_COMP_INTRA_PRED
......@@ -3718,7 +3718,7 @@ void vp8_rd_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset, int
#if CONFIG_TX16X16
mb_skippable = mb_is_skippable_16x16(&x->e_mbd);
#else
mb_skippable = mb_is_skippable_8x8(&x->e_mbd);
mb_skippable = mb_is_skippable_8x8(&x->e_mbd, has_y2);
#endif
} else {
#if CONFIG_TX16X16
......@@ -3726,7 +3726,7 @@ void vp8_rd_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset, int
& mby_is_skippable_16x16(&x->e_mbd);
#else
mb_skippable = uv_intra_skippable_8x8
& mby_is_skippable_8x8(&x->e_mbd);
& mby_is_skippable_8x8(&x->e_mbd, has_y2);
#endif
}
} else {
......
......@@ -42,6 +42,8 @@ void vp8_stuff_mb(VP8_COMP *cpi,
MACROBLOCKD *xd, TOKENEXTRA **t, int dry_run);
void vp8_stuff_mb_8x8(VP8_COMP *cpi,
MACROBLOCKD *xd, TOKENEXTRA **t, int dry_run);
void vp8_stuff_mb_8x8_4x4uv(VP8_COMP *cpi,
MACROBLOCKD *xd, TOKENEXTRA **t, int dry_run);
#if CONFIG_TX16X16 || CONFIG_HYBRIDTRANSFORM16X16
void vp8_stuff_mb_16x16(VP8_COMP *cpi, MACROBLOCKD *xd,
TOKENEXTRA **t, int dry_run);
......@@ -746,13 +748,18 @@ int mb_is_skippable(MACROBLOCKD *xd, int has_y2_block) {
mbuv_is_skippable(xd));
}
int mby_is_skippable_8x8(MACROBLOCKD *xd) {
int mby_is_skippable_8x8(MACROBLOCKD *xd, int has_y2_block) {
int skip = 1;
int i = 0;
for (i = 0; i < 16; i += 4)
skip &= (xd->block[i].eob < 2);
skip &= (!xd->block[24].eob);
if (has_y2_block) {
for (i = 0; i < 16; i += 4)
skip &= (xd->block[i].eob < 2);
skip &= (!xd->block[24].eob);
} else {
for (i = 0; i < 16; i += 4)
skip &= (!xd->block[i].eob);
}
return skip;
}
......@@ -760,8 +767,14 @@ int mbuv_is_skippable_8x8(MACROBLOCKD *xd) {
return (!xd->block[16].eob) & (!xd->block[20].eob);
}
int mb_is_skippable_8x8(MACROBLOCKD *xd) {
return (mby_is_skippable_8x8(xd) & mbuv_is_skippable_8x8(xd));
int mb_is_skippable_8x8(MACROBLOCKD *xd, int has_y2_block) {
return (mby_is_skippable_8x8(xd, has_y2_block) &
mbuv_is_skippable_8x8(xd));
}
int mb_is_skippable_8x8_4x4uv(MACROBLOCKD *xd, int has_y2_block) {
return (mby_is_skippable_8x8(xd, has_y2_block) &
mbuv_is_skippable(xd));
}
#if CONFIG_TX16X16 || CONFIG_HYBRIDTRANSFORM16X16
......@@ -822,8 +835,14 @@ void vp8_tokenize_mb(VP8_COMP *cpi,
break;
#endif
case TX_8X8:
xd->mode_info_context->mbmi.mb_skip_coeff = mb_is_skippable_8x8(xd);
#if CONFIG_HYBRIDTRANSFORM8X8
if (xd->mode_info_context->mbmi.mode == I8X8_PRED)
xd->mode_info_context->mbmi.mb_skip_coeff = mb_is_skippable_8x8_4x4uv(xd, 0);
else
#endif
xd->mode_info_context->mbmi.mb_skip_coeff = mb_is_skippable_8x8(xd, has_y2_block);
break;
default:
xd->mode_info_context->mbmi.mb_skip_coeff = mb_is_skippable(xd, has_y2_block);
break;
......@@ -838,9 +857,14 @@ void vp8_tokenize_mb(VP8_COMP *cpi,
vp8_stuff_mb_16x16(cpi, xd, t, dry_run);
else
#endif
if (tx_type == TX_8X8)
vp8_stuff_mb_8x8(cpi, xd, t, dry_run);
else
if (tx_type == TX_8X8) {
#if CONFIG_HYBRIDTRANSFORM8X8
if (xd->mode_info_context->mbmi.mode == I8X8_PRED)
vp8_stuff_mb_8x8_4x4uv(cpi, xd, t, dry_run);
else
#endif
vp8_stuff_mb_8x8(cpi, xd, t, dry_run);
} else
vp8_stuff_mb(cpi, xd, t, dry_run);
} else {
vp8_fix_contexts(xd);
......@@ -895,6 +919,11 @@ void vp8_tokenize_mb(VP8_COMP *cpi,
if (tx_type == TX_8X8) {
ENTROPY_CONTEXT *A = (ENTROPY_CONTEXT *)xd->above_context;
ENTROPY_CONTEXT *L = (ENTROPY_CONTEXT *)xd->left_context;
#if CONFIG_HYBRIDTRANSFORM8X8
if (xd->mode_info_context->mbmi.mode == I8X8_PRED) {
plane_type = PLANE_TYPE_Y_WITH_DC;
}
#endif
for (b = 0; b < 16; b += 4) {
tokenize1st_order_b_8x8(xd,
xd->block + b, t, plane_type, xd->frame_type,
......@@ -904,48 +933,31 @@ void vp8_tokenize_mb(VP8_COMP *cpi,
*(A + vp8_block2above_8x8[b] + 1) = *(A + vp8_block2above_8x8[b]);
*(L + vp8_block2left_8x8[b] + 1) = *(L + vp8_block2left_8x8[b]);
}
for (b = 16; b < 24; b += 4) {
tokenize1st_order_b_8x8(xd,
xd->block + b, t, 2, xd->frame_type,
A + vp8_block2above_8x8[b],
L + vp8_block2left_8x8[b],
cpi, dry_run);
*(A + vp8_block2above_8x8[b] + 1) = *(A + vp8_block2above_8x8[b]);
*(L + vp8_block2left_8x8[b] + 1) = *(L + vp8_block2left_8x8[b]);
#if CONFIG_HYBRIDTRANSFORM8X8
if (xd->mode_info_context->mbmi.mode == I8X8_PRED) {
tokenize1st_order_chroma(xd, t, PLANE_TYPE_UV, cpi, dry_run);
} else
#endif
{
for (b = 16; b < 24; b += 4) {
tokenize1st_order_b_8x8(xd,
xd->block + b, t, 2, xd->frame_type,
A + vp8_block2above_8x8[b],
L + vp8_block2left_8x8[b],
cpi, dry_run);
*(A + vp8_block2above_8x8[b] + 1) = *(A + vp8_block2above_8x8[b]);
*(L + vp8_block2left_8x8[b] + 1) = *(L + vp8_block2left_8x8[b]);
}
}
} else {
#if CONFIG_HYBRIDTRANSFORM
if(active_ht) {
if (active_ht)
tokenize1st_order_ht(xd, t, plane_type, cpi, dry_run);
} else {
#if CONFIG_HYBRIDTRANSFORM8X8
if (xd->mode_info_context->mbmi.mode == I8X8_PRED) {
ENTROPY_CONTEXT *A = (ENTROPY_CONTEXT *)xd->above_context;
ENTROPY_CONTEXT *L = (ENTROPY_CONTEXT *)xd->left_context;
for (b = 0; b < 16; b += 4) {
tokenize1st_order_b_8x8(xd,
xd->block + b, t, PLANE_TYPE_Y_WITH_DC,
xd->frame_type,
A + vp8_block2above_8x8[b],
L + vp8_block2left_8x8[b],
cpi, dry_run);
*(A + vp8_block2above_8x8[b] + 1) = *(A + vp8_block2above_8x8[b]);
*(L + vp8_block2left_8x8[b] + 1) = *(L + vp8_block2left_8x8[b]);
}
tokenize1st_order_chroma(xd, t, PLANE_TYPE_UV, cpi, dry_run);
} else {
tokenize1st_order_b(xd, t, plane_type, cpi, dry_run);
}
#else
tokenize1st_order_b(xd, t, plane_type, cpi, dry_run);
#endif
}
#else
tokenize1st_order_b(xd, t, plane_type, cpi, dry_run);
else
#endif
tokenize1st_order_b(xd, t, plane_type, cpi, dry_run);
}
if (dry_run)
*t = t_backup;
}
......@@ -1341,7 +1353,6 @@ void vp8_stuff_mb_8x8(VP8_COMP *cpi,
*t = t_backup;
}
#if CONFIG_TX16X16 || CONFIG_HYBRIDTRANSFORM16X16
static __inline
void stuff1st_order_b_16x16(const BLOCKD *const b,
......@@ -1476,7 +1487,6 @@ void vp8_stuff_mb(VP8_COMP *cpi, MACROBLOCKD *xd,
A + vp8_block2above[24],
L + vp8_block2left[24],
cpi, dry_run);
plane_type = 0;
for (b = 0; b < 16; b++)
stuff1st_order_b(t,
......@@ -1493,6 +1503,41 @@ void vp8_stuff_mb(VP8_COMP *cpi, MACROBLOCKD *xd,
if (dry_run)
*t = t_backup;
}
void vp8_stuff_mb_8x8_4x4uv(VP8_COMP *cpi,
MACROBLOCKD *xd,
TOKENEXTRA **t,
int dry_run) {
ENTROPY_CONTEXT *A = (ENTROPY_CONTEXT *)xd->above_context;
ENTROPY_CONTEXT *L = (ENTROPY_CONTEXT *)xd->left_context;
int plane_type;
int b;
TOKENEXTRA *t_backup = *t;
stuff2nd_order_b_8x8(xd->block + 24, t, 1, xd->frame_type,
A + vp8_block2above_8x8[24],
L + vp8_block2left_8x8[24], cpi, dry_run);
plane_type = 3;
for (b = 0; b < 16; b += 4) {
stuff1st_order_b_8x8(xd->block + b, t, plane_type, xd->frame_type,
A + vp8_block2above_8x8[b],
L + vp8_block2left_8x8[b],
cpi, dry_run);
*(A + vp8_block2above_8x8[b] + 1) = *(A + vp8_block2above_8x8[b]);
*(L + vp8_block2left_8x8[b] + 1) = *(L + vp8_block2left_8x8[b]);
}
for (b = 16; b < 24; b++)
stuff1st_order_buv(t,
A + vp8_block2above[b],
L + vp8_block2left[b],
cpi, dry_run);
if (dry_run)
*t = t_backup;
}
void vp8_fix_contexts(MACROBLOCKD *xd) {
/* Clear entropy contexts for Y2 blocks */
if ((xd->mode_info_context->mbmi.mode != B_PRED
......
......@@ -34,9 +34,10 @@ int rd_cost_mby(MACROBLOCKD *);
extern int mby_is_skippable(MACROBLOCKD *xd, int has_y2_block);
extern int mbuv_is_skippable(MACROBLOCKD *xd);
extern int mb_is_skippable(MACROBLOCKD *xd, int has_y2_block);
extern int mby_is_skippable_8x8(MACROBLOCKD *xd);
extern int mby_is_skippable_8x8(MACROBLOCKD *xd, int has_y2_block);
extern int mbuv_is_skippable_8x8(MACROBLOCKD *xd);
extern int mb_is_skippable_8x8(MACROBLOCKD *xd);
extern int mb_is_skippable_8x8(MACROBLOCKD *xd, int has_y2_block);
extern int mb_is_skippable_8x8_4x4uv(MACROBLOCKD *xd, int has_y2_block);
extern int mb_is_skippable_16x16(MACROBLOCKD *xd);
extern int mby_is_skippable_16x16(MACROBLOCKD *xd);
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment