Commit f1046563 authored by Deb Mukherjee's avatar Deb Mukherjee

Merging the hybrid transform experiments

Change-Id: I99f1982b30a630a9a070a8326d83b34a33cba14c
parent 6e5cb128
......@@ -221,12 +221,9 @@ EXPERIMENT_LIST="
superblocks
pred_filter
lossless
hybridtransform
hybridtransform8x8
switchable_interp
newbestrefmv
new_mvref
hybridtransform16x16
newmventropy
tx_select
"
......
......@@ -135,14 +135,12 @@ typedef enum {
TX_SIZE_MAX // Number of different transforms available
} TX_SIZE;
#if CONFIG_HYBRIDTRANSFORM8X8 || CONFIG_HYBRIDTRANSFORM || CONFIG_HYBRIDTRANSFORM16X16
typedef enum {
DCT_DCT = 0, // DCT in both horizontal and vertical
ADST_DCT = 1, // ADST in horizontal, DCT in vertical
DCT_ADST = 2, // DCT in horizontal, ADST in vertical
ADST_ADST = 3 // ADST in both directions
} TX_TYPE;
#endif
#define VP8_YMODES (B_PRED + 1)
#define VP8_UV_MODES (TM_PRED + 1)
......@@ -184,9 +182,7 @@ typedef enum {
union b_mode_info {
struct {
B_PREDICTION_MODE first;
#if CONFIG_HYBRIDTRANSFORM8X8 || CONFIG_HYBRIDTRANSFORM || CONFIG_HYBRIDTRANSFORM16X16
TX_TYPE tx_type;
#endif
#if CONFIG_COMP_INTRA_PRED
B_PREDICTION_MODE second;
......@@ -388,17 +384,11 @@ typedef struct MacroBlockD {
} MACROBLOCKD;
#if CONFIG_HYBRIDTRANSFORM || CONFIG_HYBRIDTRANSFORM16X16
#define ACTIVE_HT 110 // quantization stepsize threshold
#endif
#if CONFIG_HYBRIDTRANSFORM8X8
#define ACTIVE_HT8 300
#endif
#if CONFIG_HYBRIDTRANSFORM16X16
#define ACTIVE_HT16 300
#endif
// convert MB_PREDICTION_MODE to B_PREDICTION_MODE
static B_PREDICTION_MODE pred_mode_conv(MB_PREDICTION_MODE mode) {
......@@ -442,7 +432,6 @@ static B_PREDICTION_MODE pred_mode_conv(MB_PREDICTION_MODE mode) {
return b_mode;
}
#if CONFIG_HYBRIDTRANSFORM || CONFIG_HYBRIDTRANSFORM8X8 || CONFIG_HYBRIDTRANSFORM16X16
// transform mapping
static TX_TYPE txfm_map(B_PREDICTION_MODE bmode) {
// map transform type
......@@ -470,9 +459,7 @@ static TX_TYPE txfm_map(B_PREDICTION_MODE bmode) {
}
return tx_type;
}
#endif
#if CONFIG_HYBRIDTRANSFORM
static TX_TYPE get_tx_type_4x4(const MACROBLOCKD *xd, const BLOCKD *b) {
TX_TYPE tx_type = DCT_DCT;
if (xd->mode_info_context->mbmi.mode == B_PRED &&
......@@ -481,9 +468,7 @@ static TX_TYPE get_tx_type_4x4(const MACROBLOCKD *xd, const BLOCKD *b) {
}
return tx_type;
}
#endif
#if CONFIG_HYBRIDTRANSFORM8X8
static TX_TYPE get_tx_type_8x8(const MACROBLOCKD *xd, const BLOCKD *b) {
TX_TYPE tx_type = DCT_DCT;
if (xd->mode_info_context->mbmi.mode == I8X8_PRED &&
......@@ -492,9 +477,7 @@ static TX_TYPE get_tx_type_8x8(const MACROBLOCKD *xd, const BLOCKD *b) {
}
return tx_type;
}
#endif
#if CONFIG_HYBRIDTRANSFORM16X16
static TX_TYPE get_tx_type_16x16(const MACROBLOCKD *xd, const BLOCKD *b) {
TX_TYPE tx_type = DCT_DCT;
if (xd->mode_info_context->mbmi.mode < I8X8_PRED &&
......@@ -503,34 +486,24 @@ static TX_TYPE get_tx_type_16x16(const MACROBLOCKD *xd, const BLOCKD *b) {
}
return tx_type;
}
#endif
#if CONFIG_HYBRIDTRANSFORM || CONFIG_HYBRIDTRANSFORM8X8 || \
CONFIG_HYBRIDTRANSFORM16X16
static TX_TYPE get_tx_type(const MACROBLOCKD *xd, const BLOCKD *b) {
TX_TYPE tx_type = DCT_DCT;
int ib = (b - xd->block);
if (ib >= 16)
return tx_type;
#if CONFIG_HYBRIDTRANSFORM16X16
if (xd->mode_info_context->mbmi.txfm_size == TX_16X16) {
tx_type = get_tx_type_16x16(xd, b);
}
#endif
#if CONFIG_HYBRIDTRANSFORM8X8
if (xd->mode_info_context->mbmi.txfm_size == TX_8X8) {
ib = (ib & 8) + ((ib & 4) >> 1);
tx_type = get_tx_type_8x8(xd, &xd->block[ib]);
}
#endif
#if CONFIG_HYBRIDTRANSFORM
if (xd->mode_info_context->mbmi.txfm_size == TX_4X4) {
tx_type = get_tx_type_4x4(xd, b);
}
#endif
return tx_type;
}
#endif
extern void vp8_build_block_doffsets(MACROBLOCKD *xd);
extern void vp8_setup_block_dptrs(MACROBLOCKD *xd);
......
......@@ -13,9 +13,9 @@
static const vp8_prob default_coef_probs [BLOCK_TYPES]
[COEF_BANDS]
[PREV_COEF_CONTEXTS]
[ENTROPY_NODES] = {
[COEF_BANDS]
[PREV_COEF_CONTEXTS]
[ENTROPY_NODES] = {
{
/* Block Type ( 0 ) */
{
......@@ -254,11 +254,10 @@ static const vp8_prob default_coef_probs [BLOCK_TYPES]
}
};
#if CONFIG_HYBRIDTRANSFORM
static const vp8_prob default_hybrid_coef_probs [BLOCK_TYPES]
[COEF_BANDS]
[PREV_COEF_CONTEXTS]
[ENTROPY_NODES] = {
[COEF_BANDS]
[PREV_COEF_CONTEXTS]
[ENTROPY_NODES] = {
{
/* Block Type ( 0 ) */
{
......@@ -496,7 +495,6 @@ static const vp8_prob default_hybrid_coef_probs [BLOCK_TYPES]
}
}
};
#endif
static const vp8_prob
default_coef_probs_8x8[BLOCK_TYPES_8X8]
......@@ -731,12 +729,11 @@ default_coef_probs_8x8[BLOCK_TYPES_8X8]
}
};
#if CONFIG_HYBRIDTRANSFORM8X8
static const vp8_prob
default_hybrid_coef_probs_8x8[BLOCK_TYPES_8X8]
[COEF_BANDS]
[PREV_COEF_CONTEXTS]
[ENTROPY_NODES] = {
[COEF_BANDS]
[PREV_COEF_CONTEXTS]
[ENTROPY_NODES] = {
{
/* block Type 0 */
{
......@@ -964,7 +961,6 @@ default_hybrid_coef_probs_8x8[BLOCK_TYPES_8X8]
}
}
};
#endif
static const vp8_prob
default_coef_probs_16x16[BLOCK_TYPES_16X16]
......@@ -1173,7 +1169,6 @@ static const vp8_prob
}
};
#if CONFIG_HYBRIDTRANSFORM16X16
static const vp8_prob
default_hybrid_coef_probs_16x16[BLOCK_TYPES_16X16]
[COEF_BANDS]
......@@ -1380,4 +1375,3 @@ static const vp8_prob
}
}
};
#endif
......@@ -64,8 +64,6 @@ DECLARE_ALIGNED(16, const int, vp8_default_zig_zag1d[16]) = {
7, 11, 14, 15,
};
#if CONFIG_HYBRIDTRANSFORM
DECLARE_ALIGNED(16, const int, vp8_col_scan[16]) = {
0, 4, 8, 12,
1, 5, 9, 13,
......@@ -78,7 +76,6 @@ DECLARE_ALIGNED(16, const int, vp8_row_scan[16]) = {
8, 9, 10, 11,
12, 13, 14, 15
};
#endif
DECLARE_ALIGNED(64, const int, vp8_coef_bands_8x8[64]) = { 0, 1, 2, 3, 5, 4, 4, 5,
......@@ -208,25 +205,19 @@ vp8_extra_bit_struct vp8_extra_bits[12] = {
void vp8_default_coef_probs(VP8_COMMON *pc) {
vpx_memcpy(pc->fc.coef_probs, default_coef_probs,
sizeof(pc->fc.coef_probs));
#if CONFIG_HYBRIDTRANSFORM
vpx_memcpy(pc->fc.hybrid_coef_probs, default_hybrid_coef_probs,
sizeof(pc->fc.hybrid_coef_probs));
#endif
vpx_memcpy(pc->fc.coef_probs_8x8, default_coef_probs_8x8,
sizeof(pc->fc.coef_probs_8x8));
#if CONFIG_HYBRIDTRANSFORM8X8
vpx_memcpy(pc->fc.hybrid_coef_probs_8x8, default_hybrid_coef_probs_8x8,
sizeof(pc->fc.hybrid_coef_probs_8x8));
#endif
vpx_memcpy(pc->fc.coef_probs_16x16, default_coef_probs_16x16,
sizeof(pc->fc.coef_probs_16x16));
#if CONFIG_HYBRIDTRANSFORM16X16
vpx_memcpy(pc->fc.hybrid_coef_probs_16x16,
default_hybrid_coef_probs_16x16,
sizeof(pc->fc.hybrid_coef_probs_16x16));
#endif
}
void vp8_coef_tree_initialize() {
......@@ -344,7 +335,6 @@ void vp8_adapt_coef_probs(VP8_COMMON *cm) {
}
}
#if CONFIG_HYBRIDTRANSFORM
for (i = 0; i < BLOCK_TYPES; ++i)
for (j = 0; j < COEF_BANDS; ++j)
for (k = 0; k < PREV_COEF_CONTEXTS; ++k) {
......@@ -366,7 +356,6 @@ void vp8_adapt_coef_probs(VP8_COMMON *cm) {
else cm->fc.hybrid_coef_probs[i][j][k][t] = prob;
}
}
#endif
for (i = 0; i < BLOCK_TYPES_8X8; ++i)
for (j = 0; j < COEF_BANDS; ++j)
......@@ -390,7 +379,6 @@ void vp8_adapt_coef_probs(VP8_COMMON *cm) {
}
}
#if CONFIG_HYBRIDTRANSFORM8X8
for (i = 0; i < BLOCK_TYPES_8X8; ++i)
for (j = 0; j < COEF_BANDS; ++j)
for (k = 0; k < PREV_COEF_CONTEXTS; ++k) {
......@@ -413,7 +401,6 @@ void vp8_adapt_coef_probs(VP8_COMMON *cm) {
else cm->fc.hybrid_coef_probs_8x8[i][j][k][t] = prob;
}
}
#endif
for (i = 0; i < BLOCK_TYPES_16X16; ++i)
for (j = 0; j < COEF_BANDS; ++j)
......@@ -437,7 +424,6 @@ void vp8_adapt_coef_probs(VP8_COMMON *cm) {
}
}
#if CONFIG_HYBRIDTRANSFORM16X16
for (i = 0; i < BLOCK_TYPES_16X16; ++i)
for (j = 0; j < COEF_BANDS; ++j)
for (k = 0; k < PREV_COEF_CONTEXTS; ++k) {
......@@ -458,5 +444,4 @@ void vp8_adapt_coef_probs(VP8_COMMON *cm) {
else cm->fc.hybrid_coef_probs_16x16[i][j][k][t] = prob;
}
}
#endif
}
......@@ -104,10 +104,8 @@ struct VP8Common;
void vp8_default_coef_probs(struct VP8Common *);
extern DECLARE_ALIGNED(16, const int, vp8_default_zig_zag1d[16]);
#if CONFIG_HYBRIDTRANSFORM
extern DECLARE_ALIGNED(16, const int, vp8_col_scan[16]);
extern DECLARE_ALIGNED(16, const int, vp8_row_scan[16]);
#endif
extern short vp8_default_zig_zag_mask[16];
extern DECLARE_ALIGNED(64, const int, vp8_default_zig_zag1d_8x8[64]);
......
......@@ -109,12 +109,9 @@ extern prototype_second_order(vp8_short_inv_walsh4x4_lossless_c);
extern prototype_second_order(vp8_short_inv_walsh4x4_1_lossless_c);
#endif
#if CONFIG_HYBRIDTRANSFORM8X8 || CONFIG_HYBRIDTRANSFORM || CONFIG_HYBRIDTRANSFORM16X16
#include "vp8/common/blockd.h"
void vp8_ihtllm_c(short *input, short *output, int pitch,
TX_TYPE tx_type, int tx_dim);
#endif
typedef prototype_idct((*vp8_idct_fn_t));
typedef prototype_idct_scalar_add((*vp8_idct_scalar_add_fn_t));
......
......@@ -26,9 +26,7 @@
#include "vp8/common/idct.h"
#include "vp8/common/systemdependent.h"
#if CONFIG_HYBRIDTRANSFORM
#include "vp8/common/blockd.h"
#endif
#include <math.h>
......@@ -38,7 +36,6 @@ static const int rounding = 0;
// TODO: these transforms can be further converted into integer forms
// for complexity optimization
#if CONFIG_HYBRIDTRANSFORM || CONFIG_HYBRIDTRANSFORM8X8 || CONFIG_HYBRIDTRANSFORM16X16
float idct_4[16] = {
0.500000000000000, 0.653281482438188, 0.500000000000000, 0.270598050073099,
0.500000000000000, 0.270598050073099, -0.500000000000000, -0.653281482438188,
......@@ -90,9 +87,7 @@ float iadst_8[64] = {
0.483002021635509, -0.466553967085785, 0.434217976756762, -0.387095214016348,
0.326790388032145, -0.255357107325375, 0.175227946595736, -0.089131608307532
};
#endif
#if CONFIG_HYBRIDTRANSFORM16X16 || CONFIG_HYBRIDTRANSFORM || CONFIG_HYBRIDTRANSFORM8X8
float idct_16[256] = {
0.250000, 0.351851, 0.346760, 0.338330, 0.326641, 0.311806, 0.293969, 0.273300,
0.250000, 0.224292, 0.196424, 0.166664, 0.135299, 0.102631, 0.068975, 0.034654,
......@@ -162,9 +157,7 @@ float iadst_16[256] = {
0.347761, -0.344612, 0.338341, -0.329007, 0.316693, -0.301511, 0.283599, -0.263118,
0.240255, -0.215215, 0.188227, -0.159534, 0.129396, -0.098087, 0.065889, -0.033094
};
#endif
#if CONFIG_HYBRIDTRANSFORM8X8 || CONFIG_HYBRIDTRANSFORM || CONFIG_HYBRIDTRANSFORM16X16
void vp8_ihtllm_c(short *input, short *output, int pitch,
TX_TYPE tx_type, int tx_dim) {
......@@ -289,7 +282,6 @@ void vp8_ihtllm_c(short *input, short *output, int pitch,
}
vp8_clear_system_state(); // Make it simd safe : __asm emms;
}
#endif
void vp8_short_idct4x4llm_c(short *input, short *output, int pitch) {
int i;
......
......@@ -51,17 +51,11 @@ typedef struct frame_contexts {
vp8_prob sub_mv_ref_prob [SUBMVREF_COUNT][VP8_SUBMVREFS - 1];
vp8_prob mbsplit_prob [VP8_NUMMBSPLITS - 1];
vp8_prob coef_probs [BLOCK_TYPES] [COEF_BANDS] [PREV_COEF_CONTEXTS] [ENTROPY_NODES];
#if CONFIG_HYBRIDTRANSFORM
vp8_prob hybrid_coef_probs [BLOCK_TYPES] [COEF_BANDS] [PREV_COEF_CONTEXTS] [ENTROPY_NODES];
#endif
vp8_prob coef_probs_8x8 [BLOCK_TYPES_8X8] [COEF_BANDS] [PREV_COEF_CONTEXTS] [ENTROPY_NODES];
#if CONFIG_HYBRIDTRANSFORM8X8
vp8_prob hybrid_coef_probs_8x8 [BLOCK_TYPES_8X8] [COEF_BANDS] [PREV_COEF_CONTEXTS] [ENTROPY_NODES];
#endif
vp8_prob coef_probs_16x16 [BLOCK_TYPES_16X16] [COEF_BANDS] [PREV_COEF_CONTEXTS] [ENTROPY_NODES];
#if CONFIG_HYBRIDTRANSFORM16X16
vp8_prob hybrid_coef_probs_16x16 [BLOCK_TYPES_16X16] [COEF_BANDS] [PREV_COEF_CONTEXTS] [ENTROPY_NODES];
#endif
#if CONFIG_NEWMVENTROPY
nmv_context nmvc;
......@@ -87,45 +81,33 @@ typedef struct frame_contexts {
vp8_prob pre_coef_probs [BLOCK_TYPES] [COEF_BANDS]
[PREV_COEF_CONTEXTS] [ENTROPY_NODES];
#if CONFIG_HYBRIDTRANSFORM
vp8_prob pre_hybrid_coef_probs [BLOCK_TYPES] [COEF_BANDS]
[PREV_COEF_CONTEXTS] [ENTROPY_NODES];
#endif
vp8_prob pre_coef_probs_8x8 [BLOCK_TYPES_8X8] [COEF_BANDS]
[PREV_COEF_CONTEXTS] [ENTROPY_NODES];
#if CONFIG_HYBRIDTRANSFORM8X8
vp8_prob pre_hybrid_coef_probs_8x8 [BLOCK_TYPES_8X8] [COEF_BANDS]
[PREV_COEF_CONTEXTS] [ENTROPY_NODES];
#endif
vp8_prob pre_coef_probs_16x16 [BLOCK_TYPES_16X16] [COEF_BANDS]
[PREV_COEF_CONTEXTS] [ENTROPY_NODES];
#if CONFIG_HYBRIDTRANSFORM16X16
vp8_prob pre_hybrid_coef_probs_16x16 [BLOCK_TYPES_16X16] [COEF_BANDS]
[PREV_COEF_CONTEXTS] [ENTROPY_NODES];
#endif
unsigned int coef_counts [BLOCK_TYPES] [COEF_BANDS]
[PREV_COEF_CONTEXTS] [MAX_ENTROPY_TOKENS];
#if CONFIG_HYBRIDTRANSFORM
unsigned int hybrid_coef_counts [BLOCK_TYPES] [COEF_BANDS]
[PREV_COEF_CONTEXTS] [MAX_ENTROPY_TOKENS];
#endif
unsigned int coef_counts_8x8 [BLOCK_TYPES_8X8] [COEF_BANDS]
[PREV_COEF_CONTEXTS] [MAX_ENTROPY_TOKENS];
#if CONFIG_HYBRIDTRANSFORM8X8
unsigned int hybrid_coef_counts_8x8 [BLOCK_TYPES_8X8] [COEF_BANDS]
[PREV_COEF_CONTEXTS] [MAX_ENTROPY_TOKENS];
#endif
unsigned int coef_counts_16x16 [BLOCK_TYPES_16X16] [COEF_BANDS]
[PREV_COEF_CONTEXTS] [MAX_ENTROPY_TOKENS];
#if CONFIG_HYBRIDTRANSFORM16X16
unsigned int hybrid_coef_counts_16x16 [BLOCK_TYPES_16X16] [COEF_BANDS]
[PREV_COEF_CONTEXTS] [MAX_ENTROPY_TOKENS];
#endif
#if CONFIG_NEWMVENTROPY
nmv_context_counts NMVcount;
......
......@@ -208,10 +208,7 @@ static void decode_macroblock(VP8D_COMP *pbi, MACROBLOCKD *xd,
MB_PREDICTION_MODE mode;
int i;
int tx_size;
#if CONFIG_HYBRIDTRANSFORM || CONFIG_HYBRIDTRANSFORM8X8 || \
CONFIG_HYBRIDTRANSFORM16X16
TX_TYPE tx_type;
#endif
#if CONFIG_SUPERBLOCKS
VP8_COMMON *pc = &pbi->common;
int orig_skip_flag = xd->mode_info_context->mbmi.mb_skip_coeff;
......@@ -330,7 +327,6 @@ static void decode_macroblock(VP8D_COMP *pbi, MACROBLOCKD *xd,
vp8_intra8x8_predict(b, i8x8mode, b->predictor);
if (xd->mode_info_context->mbmi.txfm_size == TX_8X8) {
#if CONFIG_HYBRIDTRANSFORM8X8
tx_type = get_tx_type(xd, &xd->block[idx]);
if (tx_type != DCT_DCT) {
vp8_ht_dequant_idct_add_8x8_c(tx_type,
......@@ -338,9 +334,6 @@ static void decode_macroblock(VP8D_COMP *pbi, MACROBLOCKD *xd,
} else {
vp8_dequant_idct_add_8x8_c(q, dq, pre, dst, 16, stride);
}
#else
vp8_dequant_idct_add_8x8_c(q, dq, pre, dst, 16, stride);
#endif
q += 64;
} else {
for (j = 0; j < 4; j++) {
......@@ -380,7 +373,6 @@ static void decode_macroblock(VP8D_COMP *pbi, MACROBLOCKD *xd,
}
#endif
#if CONFIG_HYBRIDTRANSFORM
tx_type = get_tx_type(xd, b);
if (tx_type != DCT_DCT) {
vp8_ht_dequant_idct_add_c(tx_type, b->qcoeff,
......@@ -390,18 +382,6 @@ static void decode_macroblock(VP8D_COMP *pbi, MACROBLOCKD *xd,
vp8_dequant_idct_add_c(b->qcoeff, b->dequant, b->predictor,
*(b->base_dst) + b->dst, 16, b->dst_stride);
}
#else
if (xd->eobs[i] > 1) {
DEQUANT_INVOKE(&pbi->dequant, idct_add)
(b->qcoeff, b->dequant, b->predictor,
*(b->base_dst) + b->dst, 16, b->dst_stride);
} else {
IDCT_INVOKE(RTCD_VTABLE(idct), idct1_scalar_add)
(b->qcoeff[0] * b->dequant[0], b->predictor,
*(b->base_dst) + b->dst, 16, b->dst_stride);
((int *)b->qcoeff)[0] = 0;
}
#endif
}
} else if (mode == SPLITMV) {
DEQUANT_INVOKE(&pbi->dequant, idct_add_y_block)
......@@ -412,7 +392,6 @@ static void decode_macroblock(VP8D_COMP *pbi, MACROBLOCKD *xd,
BLOCKD *b = &xd->block[24];
if (tx_size == TX_16X16) {
#if CONFIG_HYBRIDTRANSFORM16X16
BLOCKD *bd = &xd->block[0];
tx_type = get_tx_type(xd, bd);
if (tx_type != DCT_DCT) {
......@@ -424,11 +403,6 @@ static void decode_macroblock(VP8D_COMP *pbi, MACROBLOCKD *xd,
xd->predictor, xd->dst.y_buffer,
16, xd->dst.y_stride);
}
#else
vp8_dequant_idct_add_16x16_c(xd->qcoeff, xd->block[0].dequant,
xd->predictor, xd->dst.y_buffer,
16, xd->dst.y_stride);
#endif
} else if (tx_size == TX_8X8) {
#if CONFIG_SUPERBLOCKS
void *orig = xd->mode_info_context;
......@@ -900,7 +874,6 @@ static void read_coef_probs(VP8D_COMP *pbi, BOOL_DECODER* const bc) {
}
}
}
#if CONFIG_HYBRIDTRANSFORM
{
if (vp8_read_bit(bc)) {
/* read coef probability tree */
......@@ -920,7 +893,6 @@ static void read_coef_probs(VP8D_COMP *pbi, BOOL_DECODER* const bc) {
}
}
}
#endif
if (pbi->common.txfm_mode != ONLY_4X4 && vp8_read_bit(bc)) {
// read coef probability tree
......@@ -940,7 +912,6 @@ static void read_coef_probs(VP8D_COMP *pbi, BOOL_DECODER* const bc) {
}
}
}
#if CONFIG_HYBRIDTRANSFORM8X8
if (pbi->common.txfm_mode != ONLY_4X4 && vp8_read_bit(bc)) {
// read coef probability tree
for (i = 0; i < BLOCK_TYPES_8X8; i++)
......@@ -959,7 +930,6 @@ static void read_coef_probs(VP8D_COMP *pbi, BOOL_DECODER* const bc) {
}
}
}
#endif
// 16x16
if (pbi->common.txfm_mode > ALLOW_8X8 && vp8_read_bit(bc)) {
......@@ -980,7 +950,6 @@ static void read_coef_probs(VP8D_COMP *pbi, BOOL_DECODER* const bc) {
}
}
}
#if CONFIG_HYBRIDTRANSFORM16X16
if (pbi->common.txfm_mode > ALLOW_8X8 && vp8_read_bit(bc)) {
// read coef probability tree
for (i = 0; i < BLOCK_TYPES_16X16; ++i)
......@@ -999,7 +968,6 @@ static void read_coef_probs(VP8D_COMP *pbi, BOOL_DECODER* const bc) {
}
}
}
#endif
}
int vp8_decode_frame(VP8D_COMP *pbi) {
......@@ -1362,22 +1330,16 @@ int vp8_decode_frame(VP8D_COMP *pbi) {
vp8_copy(pbi->common.fc.pre_coef_probs,
pbi->common.fc.coef_probs);
#if CONFIG_HYBRIDTRANSFORM
vp8_copy(pbi->common.fc.pre_hybrid_coef_probs,
pbi->common.fc.hybrid_coef_probs);
#endif
vp8_copy(pbi->common.fc.pre_coef_probs_8x8,
pbi->common.fc.coef_probs_8x8);
#if CONFIG_HYBRIDTRANSFORM8X8
vp8_copy(pbi->common.fc.pre_hybrid_coef_probs_8x8,
pbi->common.fc.hybrid_coef_probs_8x8);
#endif
vp8_copy(pbi->common.fc.pre_coef_probs_16x16,
pbi->common.fc.coef_probs_16x16);
#if CONFIG_HYBRIDTRANSFORM16X16
vp8_copy(pbi->common.fc.pre_hybrid_coef_probs_16x16,
pbi->common.fc.hybrid_coef_probs_16x16);
#endif
vp8_copy(pbi->common.fc.pre_ymode_prob, pbi->common.fc.ymode_prob);
vp8_copy(pbi->common.fc.pre_uv_mode_prob, pbi->common.fc.uv_mode_prob);
vp8_copy(pbi->common.fc.pre_bmode_prob, pbi->common.fc.bmode_prob);
......@@ -1391,17 +1353,11 @@ int vp8_decode_frame(VP8D_COMP *pbi) {
vp8_copy(pbi->common.fc.pre_mvc_hp, pbi->common.fc.mvc_hp);
#endif
vp8_zero(pbi->common.fc.coef_counts);
#if CONFIG_HYBRIDTRANSFORM
vp8_zero(pbi->common.fc.hybrid_coef_counts);
#endif
vp8_zero(pbi->common.fc.coef_counts_8x8);
#if CONFIG_HYBRIDTRANSFORM8X8
vp8_zero(pbi->common.fc.hybrid_coef_counts_8x8);
#endif
vp8_zero(pbi->common.fc.coef_counts_16x16);
#if CONFIG_HYBRIDTRANSFORM16X16
vp8_zero(pbi->common.fc.hybrid_coef_counts_16x16);
#endif
vp8_zero(pbi->common.fc.ymode_counts);
vp8_zero(pbi->common.fc.uv_mode_counts);
vp8_zero(pbi->common.fc.bmode_counts);
......
......@@ -42,7 +42,6 @@ void vp8_dequantize_b_c(BLOCKD *d) {
}
#if CONFIG_HYBRIDTRANSFORM
void vp8_ht_dequant_idct_add_c(TX_TYPE tx_type, short *input, short *dq,
unsigned char *pred, unsigned char *dest,
int pitch, int stride) {
......@@ -77,9 +76,7 @@ void vp8_ht_dequant_idct_add_c(TX_TYPE tx_type, short *input, short *dq,
pred += pitch;
}
}
#endif
#if CONFIG_HYBRIDTRANSFORM8X8
void vp8_ht_dequant_idct_add_8x8_c(TX_TYPE tx_type, short *input, short *dq,
unsigned char *pred, unsigned char *dest,
int pitch, int stride) {
......@@ -123,7 +120,6 @@ void vp8_ht_dequant_idct_add_8x8_c(TX_TYPE tx_type, short *input, short *dq,
pred = origpred + (b + 1) / 2 * 4 * pitch + ((b + 1) % 2) * 4;
}
}
#endif
void vp8_dequant_idct_add_c(short *input, short *dq, unsigned char *pred,
unsigned char *dest, int pitch, int stride) {
......@@ -468,7 +464,6 @@ void vp8_dequant_dc_idct_add_8x8_c(short *input, short *dq, unsigned char *pred,
#endif
}
#if CONFIG_HYBRIDTRANSFORM16X16
void vp8_ht_dequant_idct_add_16x16_c(TX_TYPE tx_type, short *input, short *dq,
unsigned char *pred, unsigned char *dest,
int pitch, int stride) {
......@@ -507,7 +502,6 @@ void vp8_ht_dequant_idct_add_16x16_c(TX_TYPE tx_type, short *input, short *dq,
pred += pitch;
}
}
#endif
void vp8_dequant_idct_add_16x16_c(short *input, short *dq, unsigned char *pred,
unsigned char *dest, int pitch, int stride) {
......
......@@ -76,7 +76,6 @@ extern prototype_dequant_block(vp8_dequant_block);
#endif
extern prototype_dequant_idct_add(vp8_dequant_idct_add);
#if CONFIG_HYBRIDTRANSFORM
// declare dequantization and inverse transform module of hybrid transform decoder
#ifndef vp8_ht_dequant_idct_add
#define vp8_ht_dequant_idct_add vp8_ht_dequant_idct_add_c
......@@ -85,7 +84,6 @@ extern void vp8_ht_dequant_idct_add(TX_TYPE tx_type, short *input, short *dq,