Commit c5af82b7 authored by Deb Mukherjee's avatar Deb Mukherjee

Hybrid transform cleanups

Some cleanups that will make it easier to maintain the code
and incorporate upcoming changes on  entropy coding for the
hybrid transforms.

Change-Id: I44bdba368f7b8bf203161d7a6d3b1fc2c9e21a8f
parent 00f9eb65
......@@ -493,7 +493,35 @@ static void txfm_map(BLOCKD *b, B_PREDICTION_MODE bmode) {
break;
}
}
static TX_TYPE get_tx_type(MACROBLOCKD *xd, BLOCKD *b) {
TX_TYPE tx_type = DCT_DCT;
#if CONFIG_HYBRIDTRANSFORM16X16
if (xd->mode_info_context->mbmi.txfm_size == TX_16X16) {
if (xd->mode_info_context->mbmi.mode < I8X8_PRED &&
xd->q_index < ACTIVE_HT16)
tx_type = b->bmi.as_mode.tx_type;
return tx_type;
}
#endif
#if CONFIG_HYBRIDTRANSFORM8X8
if (xd->mode_info_context->mbmi.txfm_size == TX_8X8) {
if (xd->mode_info_context->mbmi.mode == I8X8_PRED)
tx_type = b->bmi.as_mode.tx_type;
return tx_type;
}
#endif
#if CONFIG_HYBRIDTRANSFORM
if (xd->mode_info_context->mbmi.txfm_size == TX_4X4) {
if (xd->mode_info_context->mbmi.mode == B_PRED &&
xd->q_index < ACTIVE_HT)
tx_type = b->bmi.as_mode.tx_type;
return tx_type;
}
#endif
}
#endif
extern void vp8_build_block_doffsets(MACROBLOCKD *xd);
extern void vp8_setup_block_dptrs(MACROBLOCKD *xd);
......
......@@ -40,10 +40,6 @@
#include <stdio.h>
#ifdef DEC_DEBUG
int dec_debug = 0;
#endif
#define COEFCOUNT_TESTING
static int merge_index(int v, int n, int modulus) {
......@@ -209,16 +205,6 @@ static void skip_recon_mb(VP8D_COMP *pbi, MACROBLOCKD *xd) {
}
#endif
}
#ifdef DEC_DEBUG
if (dec_debug) {
int i, j;
printf("Generating predictors\n");
for (i = 0; i < 16; i++) {
for (j = 0; j < 16; j++) printf("%3d ", xd->dst.y_buffer[i * xd->dst.y_stride + j]);
printf("\n");
}
}
#endif
}
......@@ -228,7 +214,7 @@ static void decode_macroblock(VP8D_COMP *pbi, MACROBLOCKD *xd,
int eobtotal = 0;
MB_PREDICTION_MODE mode;
int i;
int tx_type;
int tx_size;
#if CONFIG_SUPERBLOCKS
VP8_COMMON *pc = &pbi->common;
int orig_skip_flag = xd->mode_info_context->mbmi.mb_skip_coeff;
......@@ -255,14 +241,10 @@ static void decode_macroblock(VP8D_COMP *pbi, MACROBLOCKD *xd,
xd->mode_info_context->mbmi.mode == NEARMV ||
xd->mode_info_context->mbmi.mode == NEARESTMV)
xd->mode_info_context->mbmi.txfm_size = TX_16X16;
else if (pbi->common.txfm_mode == ALLOW_8X8 &&
xd->mode_info_context->mbmi.mode != I8X8_PRED &&
xd->mode_info_context->mbmi.mode != B_PRED)
#else
if (pbi->common.txfm_mode == ALLOW_8X8 &&
xd->mode_info_context->mbmi.mode != I8X8_PRED &&
xd->mode_info_context->mbmi.mode != B_PRED)
else
#endif
if (pbi->common.txfm_mode == ALLOW_8X8 &&
xd->mode_info_context->mbmi.mode != B_PRED)
xd->mode_info_context->mbmi.txfm_size = TX_8X8;
else
xd->mode_info_context->mbmi.txfm_size = TX_4X4;
......@@ -272,37 +254,61 @@ static void decode_macroblock(VP8D_COMP *pbi, MACROBLOCKD *xd,
xd->mode_info_context->mbmi.mode == NEWMV ||
xd->mode_info_context->mbmi.mode == ZEROMV ||
xd->mode_info_context->mbmi.mode == NEARMV ||
xd->mode_info_context->mbmi.mode == NEARESTMV) {
xd->mode_info_context->mbmi.mode == NEARESTMV)
xd->mode_info_context->mbmi.txfm_size = TX_16X16;
} else if (pbi->common.txfm_mode == ALLOW_8X8 &&
xd->mode_info_context->mbmi.mode != I8X8_PRED &&
xd->mode_info_context->mbmi.mode != B_PRED &&
xd->mode_info_context->mbmi.mode != SPLITMV) {
#else
else
#endif
if (pbi->common.txfm_mode == ALLOW_8X8 &&
xd->mode_info_context->mbmi.mode != I8X8_PRED &&
xd->mode_info_context->mbmi.mode != B_PRED &&
xd->mode_info_context->mbmi.mode != SPLITMV) {
#endif
xd->mode_info_context->mbmi.txfm_size = TX_8X8;
}
else {
} else {
xd->mode_info_context->mbmi.txfm_size = TX_4X4;
}
}
#if CONFIG_HYBRIDTRANSFORM8X8
if (xd->mode_info_context->mbmi.mode == I8X8_PRED) {
xd->mode_info_context->mbmi.txfm_size = TX_8X8;
}
#endif
#if CONFIG_SUPERBLOCKS
if (xd->mode_info_context->mbmi.encoded_as_sb) {
xd->mode_info_context->mbmi.txfm_size = TX_8X8;
}
#endif
tx_type = xd->mode_info_context->mbmi.txfm_size;
tx_size = xd->mode_info_context->mbmi.txfm_size;
mode = xd->mode_info_context->mbmi.mode;
#if CONFIG_HYBRIDTRANSFORM
// parse transform types for intra 4x4 mode
QIndex = xd->q_index;
active_ht = (QIndex < ACTIVE_HT);
if (mode == B_PRED) {
for (i = 0; i < 16; i++) {
BLOCKD *b = &xd->block[i];
int b_mode = xd->mode_info_context->bmi[i].as_mode.first;
if(active_ht)
txfm_map(b, b_mode);
} // loop over 4x4 blocks
}
#endif
#if CONFIG_HYBRIDTRANSFORM8X8
if (mode == I8X8_PRED) {
for (i = 0; i < 4; i++) {
int ib = vp8_i8x8_block[i];
BLOCKD *b = &xd->block[ib];
int i8x8mode = b->bmi.as_mode.first;
txfm_map(b, pred_mode_conv(i8x8mode));
}
}
#endif
#if CONFIG_HYBRIDTRANSFORM16X16
active_ht16 = (QIndex < ACTIVE_HT16);
if (mode < I8X8_PRED) {
BLOCKD *b = &xd->block[0];
if(active_ht16)
txfm_map(b, pred_mode_conv(mode));
}
#endif
if (xd->mode_info_context->mbmi.mb_skip_coeff) {
vp8_reset_mb_tokens_context(xd);
......@@ -321,27 +327,16 @@ static void decode_macroblock(VP8D_COMP *pbi, MACROBLOCKD *xd,
xd->eobs[i] = 0;
}
#if CONFIG_TX16X16 || CONFIG_HYBRIDTRANSFORM16X16
if (tx_type == TX_16X16)
if (tx_size == TX_16X16)
eobtotal = vp8_decode_mb_tokens_16x16(pbi, xd);
else
#endif
if (tx_type == TX_8X8)
if (tx_size == TX_8X8)
eobtotal = vp8_decode_mb_tokens_8x8(pbi, xd);
else
eobtotal = vp8_decode_mb_tokens(pbi, xd);
#ifdef DEC_DEBUG
if (dec_debug) {
printf("\nTokens (%d)\n", eobtotal);
for (i = 0; i < 400; i++) {
printf("%3d ", xd->qcoeff[i]);
if (i % 16 == 15) printf("\n");
}
printf("\n");
}
#endif
}
mode = xd->mode_info_context->mbmi.mode;
#if CONFIG_SWITCHABLE_INTERP
if (pbi->common.frame_type != KEY_FRAME)
vp8_setup_interp_filters(xd, xd->mode_info_context->mbmi.interp_filter,
......@@ -366,39 +361,10 @@ static void decode_macroblock(VP8D_COMP *pbi, MACROBLOCKD *xd,
}
}
#ifdef DEC_DEBUG
if (dec_debug) {
int i, j;
printf("Generating predictors\n");
for (i = 0; i < 16; i++) {
for (j = 0; j < 16; j++) printf("%3d ", xd->dst.y_buffer[i * xd->dst.y_stride + j]);
printf("\n");
}
}
#endif
// moved to be performed before detokenization
// if (xd->segmentation_enabled)
// mb_init_dequantizer(pbi, xd);
#if CONFIG_HYBRIDTRANSFORM || CONFIG_HYBRIDTRANSFORM16X16
// parse transform types for intra 4x4 mode
QIndex = xd->q_index;
active_ht = (QIndex < ACTIVE_HT);
if (mode == B_PRED) {
for (i = 0; i < 16; i++) {
BLOCKD *b = &xd->block[i];
int b_mode = xd->mode_info_context->bmi[i].as_mode.first;
if(active_ht)
txfm_map(b, b_mode);
} // loop over 4x4 blocks
}
#endif
#if CONFIG_HYBRIDTRANSFORM16X16
active_ht16 = (QIndex < ACTIVE_HT16);
#endif
/* do prediction */
if (xd->mode_info_context->mbmi.ref_frame == INTRA_FRAME) {
#if CONFIG_SUPERBLOCKS
......@@ -451,9 +417,6 @@ static void decode_macroblock(VP8D_COMP *pbi, MACROBLOCKD *xd,
unsigned char *pre = xd->block[ib].predictor;
unsigned char *dst = *(xd->block[ib].base_dst) + xd->block[ib].dst;
int stride = xd->dst.y_stride;
tx_type = TX_4X4;
xd->mode_info_context->mbmi.txfm_size = TX_4X4;
#endif
b = &xd->block[ib];
......@@ -462,7 +425,6 @@ static void decode_macroblock(VP8D_COMP *pbi, MACROBLOCKD *xd,
(b, i8x8mode, b->predictor);
#if CONFIG_HYBRIDTRANSFORM8X8
txfm_map(b, pred_mode_conv(i8x8mode));
vp8_ht_dequant_idct_add_8x8_c(b->bmi.as_mode.tx_type,
q, dq, pre, dst, 16, stride);
q += 64;
......@@ -546,7 +508,7 @@ static void decode_macroblock(VP8D_COMP *pbi, MACROBLOCKD *xd,
BLOCKD *b = &xd->block[24];
#if CONFIG_TX16X16 || CONFIG_HYBRIDTRANSFORM16X16
if (tx_type == TX_16X16) {
if (tx_size == TX_16X16) {
#if CONFIG_HYBRIDTRANSFORM16X16
if (mode < I8X8_PRED && active_ht16) {
BLOCKD *bd = &xd->block[0];
......@@ -570,7 +532,7 @@ static void decode_macroblock(VP8D_COMP *pbi, MACROBLOCKD *xd,
}
else
#endif
if (tx_type == TX_8X8) {
if (tx_size == TX_8X8) {
#if CONFIG_SUPERBLOCKS
void *orig = xd->mode_info_context;
int n, num = xd->mode_info_context->mbmi.encoded_as_sb ? 4 : 1;
......@@ -598,16 +560,6 @@ static void decode_macroblock(VP8D_COMP *pbi, MACROBLOCKD *xd,
continue; // only happens for SBs, which are already in dest buffer
#endif
DEQUANT_INVOKE(&pbi->dequant, block_2x2)(b);
#ifdef DEC_DEBUG
if (dec_debug) {
int j;
printf("DQcoeff Haar\n");
for (j = 0; j < 16; j++) {
printf("%d ", b->dqcoeff[j]);
}
printf("\n");
}
#endif
IDCT_INVOKE(RTCD_VTABLE(idct), ihaar2)(&b->dqcoeff[0], b->diff, 8);
((int *)b->qcoeff)[0] = 0;// 2nd order block are set to 0 after inverse transform
((int *)b->qcoeff)[1] = 0;
......@@ -665,20 +617,22 @@ static void decode_macroblock(VP8D_COMP *pbi, MACROBLOCKD *xd,
#if CONFIG_SUPERBLOCKS
if (!xd->mode_info_context->mbmi.encoded_as_sb) {
#endif
if (tx_type == TX_8X8
if (xd->mode_info_context->mbmi.mode != I8X8_PRED) {
if (tx_size == TX_8X8
#if CONFIG_TX16X16 || CONFIG_HYBRIDTRANSFORM16X16
|| tx_type == TX_16X16
|| tx_size == TX_16X16
#endif
)
DEQUANT_INVOKE(&pbi->dequant, idct_add_uv_block_8x8) //
(xd->qcoeff + 16 * 16, xd->block[16].dequant,
xd->predictor + 16 * 16, xd->dst.u_buffer, xd->dst.v_buffer,
xd->dst.uv_stride, xd->eobs + 16, xd); //
else if (xd->mode_info_context->mbmi.mode != I8X8_PRED)
DEQUANT_INVOKE(&pbi->dequant, idct_add_uv_block)
(xd->qcoeff + 16 * 16, xd->block[16].dequant,
xd->predictor + 16 * 16, xd->dst.u_buffer, xd->dst.v_buffer,
xd->dst.uv_stride, xd->eobs + 16);
)
DEQUANT_INVOKE(&pbi->dequant, idct_add_uv_block_8x8) //
(xd->qcoeff + 16 * 16, xd->block[16].dequant,
xd->predictor + 16 * 16, xd->dst.u_buffer, xd->dst.v_buffer,
xd->dst.uv_stride, xd->eobs + 16, xd); //
else
DEQUANT_INVOKE(&pbi->dequant, idct_add_uv_block)
(xd->qcoeff + 16 * 16, xd->block[16].dequant,
xd->predictor + 16 * 16, xd->dst.u_buffer, xd->dst.v_buffer,
xd->dst.uv_stride, xd->eobs + 16);
}
#if CONFIG_SUPERBLOCKS
}
#endif
......@@ -753,10 +707,6 @@ decode_sb_row(VP8D_COMP *pbi, VP8_COMMON *pc, int mbrow, MACROBLOCKD *xd) {
continue;
}
#ifdef DEC_DEBUG
dec_debug = (pc->current_video_frame == 0 && mb_row == 0 && mb_col == 0);
#endif
// Set above context pointer
xd->above_context = pc->above_context + mb_col;
xd->left_context = pc->left_context + (i >> 1);
......
......@@ -1834,7 +1834,6 @@ void vp8cx_encode_intra_macro_block(VP8_COMP *cpi,
else
#endif
if (cpi->common.txfm_mode == ALLOW_8X8
&& mbmi->mode != I8X8_PRED
&& mbmi->mode != B_PRED) {
mbmi->txfm_size = TX_8X8;
cpi->t8x8_count++;
......@@ -1931,7 +1930,6 @@ void vp8cx_encode_inter_macroblock (VP8_COMP *cpi, MACROBLOCK *x,
} else
#endif
if (cpi->common.txfm_mode == ALLOW_8X8
&& mbmi->mode != I8X8_PRED
&& mbmi->mode != B_PRED
&& mbmi->mode != SPLITMV) {
mbmi->txfm_size = TX_8X8;
......@@ -2112,7 +2110,6 @@ void vp8cx_encode_inter_superblock(VP8_COMP *cpi, MACROBLOCK *x, TOKENEXTRA **t,
/* test code: set transform size based on mode selection */
if (cpi->common.txfm_mode == ALLOW_8X8
&& x->e_mbd.mode_info_context->mbmi.mode != I8X8_PRED
&& x->e_mbd.mode_info_context->mbmi.mode != B_PRED
&& x->e_mbd.mode_info_context->mbmi.mode != SPLITMV) {
x->e_mbd.mode_info_context->mbmi.txfm_size = TX_8X8;
......
......@@ -42,6 +42,8 @@ void vp8_stuff_mb(VP8_COMP *cpi,
MACROBLOCKD *xd, TOKENEXTRA **t, int dry_run);
void vp8_stuff_mb_8x8(VP8_COMP *cpi,
MACROBLOCKD *xd, TOKENEXTRA **t, int dry_run);
void vp8_stuff_mb_8x8_4x4uv(VP8_COMP *cpi,
MACROBLOCKD *xd, TOKENEXTRA **t, int dry_run);
#if CONFIG_TX16X16 || CONFIG_HYBRIDTRANSFORM16X16
void vp8_stuff_mb_16x16(VP8_COMP *cpi, MACROBLOCKD *xd,
TOKENEXTRA **t, int dry_run);
......@@ -764,6 +766,10 @@ int mb_is_skippable_8x8(MACROBLOCKD *xd) {
return (mby_is_skippable_8x8(xd) & mbuv_is_skippable_8x8(xd));
}
int mb_is_skippable_8x8_4x4uv(MACROBLOCKD *xd) {
return (mby_is_skippable_8x8(xd) & mbuv_is_skippable(xd));
}
#if CONFIG_TX16X16 || CONFIG_HYBRIDTRANSFORM16X16
int mby_is_skippable_16x16(MACROBLOCKD *xd) {
int skip = 1;
......@@ -822,7 +828,10 @@ void vp8_tokenize_mb(VP8_COMP *cpi,
break;
#endif
case TX_8X8:
xd->mode_info_context->mbmi.mb_skip_coeff = mb_is_skippable_8x8(xd);
if (xd->mode_info_context->mbmi.mode == I8X8_PRED)
xd->mode_info_context->mbmi.mb_skip_coeff = mb_is_skippable_8x8_4x4uv(xd);
else
xd->mode_info_context->mbmi.mb_skip_coeff = mb_is_skippable_8x8(xd);
break;
default:
xd->mode_info_context->mbmi.mb_skip_coeff = mb_is_skippable(xd, has_y2_block);
......@@ -838,9 +847,12 @@ void vp8_tokenize_mb(VP8_COMP *cpi,
vp8_stuff_mb_16x16(cpi, xd, t, dry_run);
else
#endif
if (tx_type == TX_8X8)
vp8_stuff_mb_8x8(cpi, xd, t, dry_run);
else
if (tx_type == TX_8X8) {
if (xd->mode_info_context->mbmi.mode == I8X8_PRED)
vp8_stuff_mb_8x8_4x4uv(cpi, xd, t, dry_run);
else
vp8_stuff_mb_8x8(cpi, xd, t, dry_run);
} else
vp8_stuff_mb(cpi, xd, t, dry_run);
} else {
vp8_fix_contexts(xd);
......@@ -895,6 +907,10 @@ void vp8_tokenize_mb(VP8_COMP *cpi,
if (tx_type == TX_8X8) {
ENTROPY_CONTEXT *A = (ENTROPY_CONTEXT *)xd->above_context;
ENTROPY_CONTEXT *L = (ENTROPY_CONTEXT *)xd->left_context;
#if CONFIG_HYBRIDTRANSFORM8X8
if (xd->mode_info_context->mbmi.mode == I8X8_PRED)
plane_type = PLANE_TYPE_Y_WITH_DC;
#endif
for (b = 0; b < 16; b += 4) {
tokenize1st_order_b_8x8(xd,
xd->block + b, t, plane_type, xd->frame_type,
......@@ -904,21 +920,34 @@ void vp8_tokenize_mb(VP8_COMP *cpi,
*(A + vp8_block2above_8x8[b] + 1) = *(A + vp8_block2above_8x8[b]);
*(L + vp8_block2left_8x8[b] + 1) = *(L + vp8_block2left_8x8[b]);
}
for (b = 16; b < 24; b += 4) {
tokenize1st_order_b_8x8(xd,
xd->block + b, t, 2, xd->frame_type,
A + vp8_block2above_8x8[b],
L + vp8_block2left_8x8[b],
cpi, dry_run);
*(A + vp8_block2above_8x8[b] + 1) = *(A + vp8_block2above_8x8[b]);
*(L + vp8_block2left_8x8[b] + 1) = *(L + vp8_block2left_8x8[b]);
#if CONFIG_HYBRIDTRANSFORM8X8
if (xd->mode_info_context->mbmi.mode == I8X8_PRED) {
tokenize1st_order_chroma(xd, t, PLANE_TYPE_UV, cpi, dry_run);
} else
#endif
{
for (b = 16; b < 24; b += 4) {
tokenize1st_order_b_8x8(xd,
xd->block + b, t, 2, xd->frame_type,
A + vp8_block2above_8x8[b],
L + vp8_block2left_8x8[b],
cpi, dry_run);
*(A + vp8_block2above_8x8[b] + 1) = *(A + vp8_block2above_8x8[b]);
*(L + vp8_block2left_8x8[b] + 1) = *(L + vp8_block2left_8x8[b]);
}
}
} else {
#if CONFIG_HYBRIDTRANSFORM
if(active_ht) {
tokenize1st_order_ht(xd, t, plane_type, cpi, dry_run);
} else {
} else
#endif
{
tokenize1st_order_b(xd, t, plane_type, cpi, dry_run);
}
}
/*
#if CONFIG_HYBRIDTRANSFORM8X8
if (xd->mode_info_context->mbmi.mode == I8X8_PRED) {
ENTROPY_CONTEXT *A = (ENTROPY_CONTEXT *)xd->above_context;
......@@ -946,6 +975,7 @@ void vp8_tokenize_mb(VP8_COMP *cpi,
tokenize1st_order_b(xd, t, plane_type, cpi, dry_run);
#endif
}
*/
if (dry_run)
*t = t_backup;
}
......@@ -1341,7 +1371,6 @@ void vp8_stuff_mb_8x8(VP8_COMP *cpi,
*t = t_backup;
}
#if CONFIG_TX16X16 || CONFIG_HYBRIDTRANSFORM16X16
static __inline
void stuff1st_order_b_16x16(const BLOCKD *const b,
......@@ -1493,6 +1522,40 @@ void vp8_stuff_mb(VP8_COMP *cpi, MACROBLOCKD *xd,
if (dry_run)
*t = t_backup;
}
void vp8_stuff_mb_8x8_4x4uv(VP8_COMP *cpi,
MACROBLOCKD *xd,
TOKENEXTRA **t,
int dry_run) {
ENTROPY_CONTEXT *A = (ENTROPY_CONTEXT *)xd->above_context;
ENTROPY_CONTEXT *L = (ENTROPY_CONTEXT *)xd->left_context;
int plane_type;
int b;
TOKENEXTRA *t_backup = *t;
stuff2nd_order_b_8x8(xd->block + 24, t, 1, xd->frame_type,
A + vp8_block2above_8x8[24],
L + vp8_block2left_8x8[24], cpi, dry_run);
plane_type = 0;
for (b = 0; b < 16; b += 4) {
stuff1st_order_b_8x8(xd->block + b, t, plane_type, xd->frame_type,
A + vp8_block2above_8x8[b],
L + vp8_block2left_8x8[b],
cpi, dry_run);
*(A + vp8_block2above_8x8[b] + 1) = *(A + vp8_block2above_8x8[b]);
*(L + vp8_block2left_8x8[b] + 1) = *(L + vp8_block2left_8x8[b]);
}
for (b = 16; b < 24; b++)
stuff1st_order_buv(t,
A + vp8_block2above[b],
L + vp8_block2left[b],
cpi, dry_run);
if (dry_run)
*t = t_backup;
}
void vp8_fix_contexts(MACROBLOCKD *xd) {
/* Clear entropy contexts for Y2 blocks */
if ((xd->mode_info_context->mbmi.mode != B_PRED
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment