Commit 9ac5508f authored by Thomas's avatar Thomas Committed by Yaowu Xu

Add EC_ADAPT experiment for symbol-adaptive entropy coding.

This experiment performs symbol-by-symbol statistics
adaptation for non-binary symbols. It requires DAALA_EC or
RANS and ANS to be enabled. The adaptation is currently
based on a simple recursive filter and is taken from
Daala. It has an adaptation rate dependent on alphabet size,
taken from Daala. It applies wherever non-binary symbols
are encoded using Cumulative Probability Functions rather
than trees.

Where symbols are adapted, forward updates in the compressed
header are removed.

In the case of RANS coefficient token values are adapted,
with the exception of the zero token which remains a
binary symbol. In the case of DAALA_EC other values
such as inter and intra modes are adapted as CDFs are
provided in those cases.

The experiment is configured with:

./configure --enable-experimental --enable-daala-ec --enable-ec-adapt

or

./configure --enable-experimental --enable-ans --enable-rans \
    --enable-ec-adapt

EC_ADAPT is not currently compatible with tiles.

BDR results on Objective-1-fast give a small loss:

PSNR YCbCr:      0.51%      0.49%      0.48%
PSNRHVS:      0.50%
SSIM:      0.50%
MSSSIM:      0.51%
CIEDE2000:      0.50%

Change-Id: I3888718e42616f3fd87144de7f125228446ac984
parent d722f71e
......@@ -203,7 +203,7 @@ static INLINE int aom_read_tree_(aom_reader *r, const aom_tree_index *tree,
return ret;
}
static INLINE int aom_read_symbol_(aom_reader *r, const aom_cdf_prob *cdf,
static INLINE int aom_read_symbol_(aom_reader *r, aom_cdf_prob *cdf,
int nsymbs ACCT_STR_PARAM) {
int ret;
#if CONFIG_RANS
......@@ -218,6 +218,10 @@ static INLINE int aom_read_symbol_(aom_reader *r, const aom_cdf_prob *cdf,
assert(0 && "Unsupported bitreader operation");
ret = -1;
#endif
#if ((CONFIG_RANS || CONFIG_DAALA_EC) && CONFIG_EC_ADAPT)
update_cdf(cdf, ret, nsymbs);
#endif
#if CONFIG_ACCOUNTING
if (ACCT_STR_NAME) aom_process_accounting(r, ACCT_STR_NAME);
#endif
......
......@@ -98,8 +98,8 @@ static INLINE void aom_write_tree(aom_writer *w, const aom_tree_index *tree,
#endif
}
static INLINE void aom_write_symbol(aom_writer *w, int symb,
const aom_cdf_prob *cdf, int nsymbs) {
static INLINE void aom_write_symbol(aom_writer *w, int symb, aom_cdf_prob *cdf,
int nsymbs) {
#if CONFIG_RANS
struct rans_sym s;
(void)nsymbs;
......@@ -116,6 +116,10 @@ static INLINE void aom_write_symbol(aom_writer *w, int symb,
(void)nsymbs;
assert(0 && "Unsupported bitwriter operation");
#endif
#if ((CONFIG_RANS || CONFIG_DAALA_EC) && CONFIG_EC_ADAPT)
update_cdf(cdf, symb, nsymbs);
#endif
}
#ifdef __cplusplus
......
......@@ -15,6 +15,7 @@
#include "./aom_config.h"
#include "./aom_dsp_common.h"
#include "aom_ports/bitops.h"
#include "aom_ports/mem.h"
#ifdef __cplusplus
......@@ -134,6 +135,42 @@ void av1_indices_from_tree(int *ind, int *inv, int len,
DECLARE_ALIGNED(16, extern const uint8_t, aom_norm[256]);
#if CONFIG_EC_ADAPT
static INLINE void update_cdf(aom_cdf_prob *cdf, int val, int nsymbs) {
const int rate = 3 + get_msb(nsymbs);
// Daala method
int i, tmp;
for (i = 0; i < val; ++i) {
tmp = 2 - (1 << rate) + i;
cdf[i] -= (cdf[i] - tmp) >> rate;
}
for (i = val; i < nsymbs; ++i) {
tmp = -(1 << rate) + 32768 + (1 << rate) - ((nsymbs - 1) - i);
cdf[i] -= (cdf[i] - tmp) >> rate;
}
// Slightly better
// int prob[16];
// int i;
// int diff;
// prob[0] = cdf[0];
// for (i=1; i<nsymbs; ++i)
// prob[i] = cdf[i] - cdf[i-1];
//
// for (i=0; i<nsymbs; ++i) {
// prob[i] -= (prob[i] >> rate);
// prob[i] = AOMMAX(prob[i],1);
// cdf[i] = i==0 ? prob[i] : cdf[i-1]+prob[i];
// }
// diff = (1<<15) - cdf[nsymbs-1];
//
// for (i=val; i<nsymbs; ++i) {
// cdf[i] += diff;
// }
//
}
#endif
#ifdef __cplusplus
} // extern "C"
#endif
......
......@@ -592,11 +592,10 @@ static INLINE const aom_prob *get_y_mode_probs(const AV1_COMMON *cm,
}
#if CONFIG_DAALA_EC
static INLINE const aom_cdf_prob *get_y_mode_cdf(const AV1_COMMON *cm,
const MODE_INFO *mi,
const MODE_INFO *above_mi,
const MODE_INFO *left_mi,
int block) {
static INLINE aom_cdf_prob *get_y_mode_cdf(AV1_COMMON *cm, const MODE_INFO *mi,
const MODE_INFO *above_mi,
const MODE_INFO *left_mi,
int block) {
const PREDICTION_MODE above = av1_above_block_mode(mi, above_mi, block);
const PREDICTION_MODE left = av1_left_block_mode(mi, left_mi, block);
return cm->kf_y_cdf[above][left];
......
......@@ -114,6 +114,7 @@ static TX_MODE read_tx_mode(struct aom_read_bit_buffer *rb) {
return aom_rb_read_bit(rb) ? TX_MODE_SELECT : aom_rb_read_literal(rb, 2);
}
#if !CONFIG_EC_ADAPT || !CONFIG_DAALA_EC
static void read_switchable_interp_probs(FRAME_CONTEXT *fc, aom_reader *r) {
int i, j;
for (j = 0; j < SWITCHABLE_FILTER_CONTEXTS; ++j) {
......@@ -165,6 +166,34 @@ static void read_inter_compound_mode_probs(FRAME_CONTEXT *fc, aom_reader *r) {
}
}
#endif // CONFIG_EXT_INTER
#if !CONFIG_EXT_TX
static void read_ext_tx_probs(FRAME_CONTEXT *fc, aom_reader *r) {
int i, j, k;
if (aom_read(r, GROUP_DIFF_UPDATE_PROB, ACCT_STR)) {
for (i = TX_4X4; i < EXT_TX_SIZES; ++i) {
for (j = 0; j < TX_TYPES; ++j) {
for (k = 0; k < TX_TYPES - 1; ++k)
av1_diff_update_prob(r, &fc->intra_ext_tx_prob[i][j][k], ACCT_STR);
#if CONFIG_DAALA_EC
av1_tree_to_cdf(av1_ext_tx_tree, fc->intra_ext_tx_prob[i][j],
fc->intra_ext_tx_cdf[i][j]);
#endif
}
}
}
if (aom_read(r, GROUP_DIFF_UPDATE_PROB, ACCT_STR)) {
for (i = TX_4X4; i < EXT_TX_SIZES; ++i) {
for (k = 0; k < TX_TYPES - 1; ++k)
av1_diff_update_prob(r, &fc->inter_ext_tx_prob[i][k], ACCT_STR);
#if CONFIG_DAALA_EC
av1_tree_to_cdf(av1_ext_tx_tree, fc->inter_ext_tx_prob[i],
fc->inter_ext_tx_cdf[i]);
#endif
}
}
}
#endif
#endif
static REFERENCE_MODE read_frame_reference_mode(
const AV1_COMMON *cm, struct aom_read_bit_buffer *rb) {
......@@ -214,8 +243,10 @@ static void update_mv_probs(aom_prob *p, int n, aom_reader *r) {
}
static void read_mv_probs(nmv_context *ctx, int allow_hp, aom_reader *r) {
int i, j;
int i;
#if !CONFIG_EC_ADAPT || !CONFIG_DAALA_EC
int j;
update_mv_probs(ctx->joints, MV_JOINTS - 1, r);
#if CONFIG_DAALA_EC || CONFIG_RANS
av1_tree_to_cdf(av1_mv_joint_tree, ctx->joints, ctx->joint_cdf);
......@@ -231,7 +262,6 @@ static void read_mv_probs(nmv_context *ctx, int allow_hp, aom_reader *r) {
av1_tree_to_cdf(av1_mv_class_tree, comp_ctx->classes, comp_ctx->class_cdf);
#endif
}
for (i = 0; i < 2; ++i) {
nmv_component *const comp_ctx = &ctx->comps[i];
for (j = 0; j < CLASS0_SIZE; ++j) {
......@@ -246,6 +276,7 @@ static void read_mv_probs(nmv_context *ctx, int allow_hp, aom_reader *r) {
av1_tree_to_cdf(av1_mv_fp_tree, comp_ctx->fp, comp_ctx->fp_cdf);
#endif
}
#endif // CONFIG_EC_ADAPT, CONFIG_DAALA_EC
if (allow_hp) {
for (i = 0; i < 2; ++i) {
......@@ -1884,13 +1915,18 @@ static void setup_token_decoder(const uint8_t *data, const uint8_t *data_end,
static void read_coef_probs_common(av1_coeff_probs_model *coef_probs,
aom_reader *r) {
int i, j, k, l, m;
#if CONFIG_EC_ADAPT
const int node_limit = ONE_TOKEN;
#else
const int node_limit = UNCONSTRAINED_NODES;
#endif
if (aom_read_bit(r, ACCT_STR))
for (i = 0; i < PLANE_TYPES; ++i)
for (j = 0; j < REF_TYPES; ++j)
for (k = 0; k < COEF_BANDS; ++k)
for (l = 0; l < BAND_COEFF_CONTEXTS(k); ++l)
for (m = 0; m < UNCONSTRAINED_NODES; ++m)
for (m = 0; m < node_limit; ++m)
av1_diff_update_prob(r, &coef_probs[i][j][k][l][m], ACCT_STR);
}
......@@ -3586,6 +3622,7 @@ static size_t read_uncompressed_header(AV1Decoder *pbi,
}
#if CONFIG_EXT_TX
#if !CONFIG_EC_ADAPT || !CONFIG_DAALA_EC
static void read_ext_tx_probs(FRAME_CONTEXT *fc, aom_reader *r) {
int i, j, k;
int s;
......@@ -3611,36 +3648,10 @@ static void read_ext_tx_probs(FRAME_CONTEXT *fc, aom_reader *r) {
}
}
}
#endif // !CONFIG_EC_ADAPT || !CONFIG_DAALA_EC
#else
static void read_ext_tx_probs(FRAME_CONTEXT *fc, aom_reader *r) {
int i, j, k;
if (aom_read(r, GROUP_DIFF_UPDATE_PROB, ACCT_STR)) {
for (i = TX_4X4; i < EXT_TX_SIZES; ++i) {
for (j = 0; j < TX_TYPES; ++j) {
for (k = 0; k < TX_TYPES - 1; ++k)
av1_diff_update_prob(r, &fc->intra_ext_tx_prob[i][j][k], ACCT_STR);
#if CONFIG_DAALA_EC
av1_tree_to_cdf(av1_ext_tx_tree, fc->intra_ext_tx_prob[i][j],
fc->intra_ext_tx_cdf[i][j]);
#endif
}
}
}
if (aom_read(r, GROUP_DIFF_UPDATE_PROB, ACCT_STR)) {
for (i = TX_4X4; i < EXT_TX_SIZES; ++i) {
for (k = 0; k < TX_TYPES - 1; ++k)
av1_diff_update_prob(r, &fc->inter_ext_tx_prob[i][k], ACCT_STR);
#if CONFIG_DAALA_EC
av1_tree_to_cdf(av1_ext_tx_tree, fc->inter_ext_tx_prob[i],
fc->inter_ext_tx_cdf[i]);
#endif
}
}
}
#endif // CONFIG_EXT_TX
#if CONFIG_SUPERTX
static void read_supertx_probs(FRAME_CONTEXT *fc, aom_reader *r) {
int i, j;
......@@ -3764,6 +3775,7 @@ static int read_compressed_header(AV1Decoder *pbi, const uint8_t *data,
av1_diff_update_prob(&r, &fc->delta_q_prob[k], ACCT_STR);
#endif
#if !CONFIG_EC_ADAPT || !CONFIG_DAALA_EC
if (cm->seg.enabled && cm->seg.update_map) {
if (cm->seg.temporal_update) {
for (k = 0; k < PREDICTION_PROBS; k++)
......@@ -3802,7 +3814,7 @@ static int read_compressed_header(AV1Decoder *pbi, const uint8_t *data,
#endif
}
#endif // CONFIG_EXT_PARTITION_TYPES
#endif // EC_ADAPT, DAALA_EC
#if CONFIG_EXT_INTRA
for (i = 0; i < INTRA_FILTERS + 1; ++i)
for (j = 0; j < INTRA_FILTERS - 1; ++j)
......@@ -3814,6 +3826,7 @@ static int read_compressed_header(AV1Decoder *pbi, const uint8_t *data,
#if CONFIG_DAALA_EC
av1_copy(cm->kf_y_cdf, av1_kf_y_mode_cdf);
#endif
#if !CONFIG_EC_ADAPT || !CONFIG_DAALA_EC
for (k = 0; k < INTRA_MODES; k++)
for (j = 0; j < INTRA_MODES; j++) {
for (i = 0; i < INTRA_MODES - 1; ++i)
......@@ -3823,12 +3836,14 @@ static int read_compressed_header(AV1Decoder *pbi, const uint8_t *data,
cm->kf_y_cdf[k][j]);
#endif
}
#endif // EC_ADAPT, DAALA_EC
} else {
#if !CONFIG_REF_MV
nmv_context *const nmvc = &fc->nmvc;
#endif
#if !CONFIG_EC_ADAPT || !CONFIG_DAALA_EC
read_inter_mode_probs(fc, &r);
#endif
#if CONFIG_EXT_INTER
read_inter_compound_mode_probs(fc, &r);
......@@ -3864,7 +3879,9 @@ static int read_compressed_header(AV1Decoder *pbi, const uint8_t *data,
}
#endif // CONFIG_MOTION_VAR || CONFIG_WARPED_MOTION
#if !CONFIG_EC_ADAPT || !CONFIG_DAALA_EC
if (cm->interp_filter == SWITCHABLE) read_switchable_interp_probs(fc, &r);
#endif
for (i = 0; i < INTRA_INTER_CONTEXTS; i++)
av1_diff_update_prob(&r, &fc->intra_inter_prob[i], ACCT_STR);
......@@ -3874,6 +3891,7 @@ static int read_compressed_header(AV1Decoder *pbi, const uint8_t *data,
read_frame_reference_mode_probs(cm, &r);
#if !CONFIG_EC_ADAPT || !CONFIG_DAALA_EC
for (j = 0; j < BLOCK_SIZE_GROUPS; j++) {
for (i = 0; i < INTRA_MODES - 1; ++i)
av1_diff_update_prob(&r, &fc->y_mode_prob[j][i], ACCT_STR);
......@@ -3882,6 +3900,7 @@ static int read_compressed_header(AV1Decoder *pbi, const uint8_t *data,
fc->y_mode_cdf[j]);
#endif
}
#endif
#if CONFIG_REF_MV
for (i = 0; i < NMV_CONTEXTS; ++i)
......@@ -3889,7 +3908,9 @@ static int read_compressed_header(AV1Decoder *pbi, const uint8_t *data,
#else
read_mv_probs(nmvc, cm->allow_high_precision_mv, &r);
#endif
#if !CONFIG_EC_ADAPT || !CONFIG_DAALA_EC
read_ext_tx_probs(fc, &r);
#endif // EC_ADAPT, DAALA_EC
#if CONFIG_SUPERTX
if (!xd->lossless[0]) read_supertx_probs(fc, &r);
#endif
......
......@@ -42,7 +42,7 @@ static INLINE int read_uniform(aom_reader *r, int n) {
#endif // CONFIG_EXT_INTRA || CONFIG_FILTER_INTRA || CONFIG_PALETTE
#if CONFIG_DAALA_EC
static PREDICTION_MODE read_intra_mode(aom_reader *r, const aom_cdf_prob *cdf) {
static PREDICTION_MODE read_intra_mode(aom_reader *r, aom_cdf_prob *cdf) {
return (PREDICTION_MODE)
av1_intra_mode_inv[aom_read_symbol(r, cdf, INTRA_MODES, ACCT_STR)];
}
......@@ -264,8 +264,7 @@ static PREDICTION_MODE read_inter_compound_mode(AV1_COMMON *cm, MACROBLOCKD *xd,
}
#endif // CONFIG_EXT_INTER
static int read_segment_id(aom_reader *r,
const struct segmentation_probs *segp) {
static int read_segment_id(aom_reader *r, struct segmentation_probs *segp) {
#if CONFIG_DAALA_EC
return aom_read_symbol(r, segp->tree_cdf, MAX_SEGMENTS, ACCT_STR);
#else
......@@ -796,8 +795,7 @@ static void read_intra_frame_mode_info(AV1_COMMON *const cm,
}
}
static int read_mv_component(aom_reader *r, const nmv_component *mvcomp,
int usehp) {
static int read_mv_component(aom_reader *r, nmv_component *mvcomp, int usehp) {
int mag, d, fr, hp;
const int sign = aom_read(r, mvcomp->sign, ACCT_STR);
const int mv_class =
......@@ -840,7 +838,7 @@ static int read_mv_component(aom_reader *r, const nmv_component *mvcomp,
}
static INLINE void read_mv(aom_reader *r, MV *mv, const MV *ref,
const nmv_context *ctx, nmv_context_counts *counts,
nmv_context *ctx, nmv_context_counts *counts,
int allow_hp) {
MV_JOINT_TYPE joint_type;
MV diff = { 0, 0 };
......
......@@ -48,15 +48,13 @@ static INLINE int read_coeff(const aom_prob *probs, int n, aom_reader *r) {
}
#if CONFIG_AOM_QM
static int decode_coefs(const MACROBLOCKD *xd, PLANE_TYPE type,
tran_low_t *dqcoeff, TX_SIZE tx_size, TX_TYPE tx_type,
const int16_t *dq, int ctx, const int16_t *scan,
const int16_t *nb, aom_reader *r,
const qm_val_t *iqm[2][TX_SIZES])
static int decode_coefs(MACROBLOCKD *xd, PLANE_TYPE type, tran_low_t *dqcoeff,
TX_SIZE tx_size, TX_TYPE tx_type, const int16_t *dq,
int ctx, const int16_t *scan, const int16_t *nb,
aom_reader *r, const qm_val_t *iqm[2][TX_SIZES])
#else
static int decode_coefs(const MACROBLOCKD *xd, PLANE_TYPE type,
tran_low_t *dqcoeff, TX_SIZE tx_size, TX_TYPE tx_type,
const int16_t *dq,
static int decode_coefs(MACROBLOCKD *xd, PLANE_TYPE type, tran_low_t *dqcoeff,
TX_SIZE tx_size, TX_TYPE tx_type, const int16_t *dq,
#if CONFIG_NEW_QUANT
dequant_val_type_nuq *dq_val,
#endif // CONFIG_NEW_QUANT
......@@ -66,20 +64,20 @@ static int decode_coefs(const MACROBLOCKD *xd, PLANE_TYPE type,
{
FRAME_COUNTS *counts = xd->counts;
const int max_eob = get_tx2d_size(tx_size);
const FRAME_CONTEXT *const fc = xd->fc;
FRAME_CONTEXT *const fc = xd->fc;
const int ref = is_inter_block(&xd->mi[0]->mbmi);
#if CONFIG_AOM_QM
const qm_val_t *iqmatrix = iqm[!ref][tx_size];
#endif
int band, c = 0;
const int tx_size_ctx = txsize_sqr_map[tx_size];
const aom_prob(*coef_probs)[COEFF_CONTEXTS][UNCONSTRAINED_NODES] =
aom_prob(*coef_probs)[COEFF_CONTEXTS][UNCONSTRAINED_NODES] =
fc->coef_probs[tx_size_ctx][type][ref];
const aom_prob *prob;
#if CONFIG_RANS || CONFIG_DAALA_EC
const aom_cdf_prob(*const coef_cdfs)[COEFF_CONTEXTS][ENTROPY_TOKENS] =
aom_cdf_prob(*coef_cdfs)[COEFF_CONTEXTS][ENTROPY_TOKENS] =
fc->coef_cdfs[tx_size_ctx][type][ref];
const aom_cdf_prob(*cdf)[ENTROPY_TOKENS];
aom_cdf_prob(*cdf)[ENTROPY_TOKENS];
#endif // CONFIG_RANS
unsigned int(*coef_counts)[COEFF_CONTEXTS][UNCONSTRAINED_NODES + 1];
unsigned int(*eob_branch_count)[COEFF_CONTEXTS];
......
This diff is collapsed.
......@@ -30,8 +30,8 @@ void av1_entropy_mv_init(void) {
av1_tokens_from_tree(mv_fp_encodings, av1_mv_fp_tree);
}
static void encode_mv_component(aom_writer *w, int comp,
const nmv_component *mvcomp, int usehp) {
static void encode_mv_component(aom_writer *w, int comp, nmv_component *mvcomp,
int usehp) {
int offset;
const int sign = comp < 0;
const int mag = sign ? -comp : comp;
......@@ -150,6 +150,7 @@ static void update_mv(aom_writer *w, const unsigned int ct[2], aom_prob *cur_p,
#endif
}
#if !CONFIG_EC_ADAPT || !CONFIG_DAALA_EC
static void write_mv_update(const aom_tree_index *tree,
aom_prob probs[/*n - 1*/],
const unsigned int counts[/*n - 1*/], int n,
......@@ -164,6 +165,7 @@ static void write_mv_update(const aom_tree_index *tree,
for (i = 0; i < n - 1; ++i)
update_mv(w, branch_ct[i], &probs[i], MV_UPDATE_PROB);
}
#endif
void av1_write_nmv_probs(AV1_COMMON *cm, int usehp, aom_writer *w,
nmv_context_counts *const nmv_counts) {
......@@ -173,8 +175,13 @@ void av1_write_nmv_probs(AV1_COMMON *cm, int usehp, aom_writer *w,
for (nmv_ctx = 0; nmv_ctx < NMV_CONTEXTS; ++nmv_ctx) {
nmv_context *const mvc = &cm->fc->nmvc[nmv_ctx];
nmv_context_counts *const counts = &nmv_counts[nmv_ctx];
write_mv_update(av1_mv_joint_tree, mvc->joints, counts->joints, MV_JOINTS,
w);
#if CONFIG_DAALA_EC
av1_tree_to_cdf(av1_mv_joint_tree, cm->fc->nmvc.joints,
cm->fc->nmvc.joint_cdf);
#endif
for (i = 0; i < 2; ++i) {
nmv_component *comp = &mvc->comps[i];
......@@ -210,7 +217,7 @@ void av1_write_nmv_probs(AV1_COMMON *cm, int usehp, aom_writer *w,
nmv_context *const mvc = &cm->fc->nmvc;
nmv_context_counts *const counts = nmv_counts;
#if !(CONFIG_DAALA_EC || CONFIG_RANS)
#if !CONFIG_EC_ADAPT || !(CONFIG_DAALA_EC || CONFIG_RANS)
write_mv_update(av1_mv_joint_tree, mvc->joints, counts->joints, MV_JOINTS, w);
#if CONFIG_DAALA_EC || CONFIG_RANS
av1_tree_to_cdf(av1_mv_joint_tree, cm->fc->nmvc.joints,
......@@ -264,7 +271,7 @@ void av1_encode_mv(AV1_COMP *cpi, aom_writer *w, const MV *mv, const MV *ref,
#if CONFIG_REF_MV
int is_compound,
#endif
const nmv_context *mvctx, int usehp) {
nmv_context *mvctx, int usehp) {
const MV diff = { mv->row - ref->row, mv->col - ref->col };
const MV_JOINT_TYPE j = av1_get_mv_joint(&diff);
#if CONFIG_REF_MV
......
......@@ -27,7 +27,7 @@ void av1_encode_mv(AV1_COMP *cpi, aom_writer *w, const MV *mv, const MV *ref,
#if CONFIG_REF_MV
int is_compound,
#endif
const nmv_context *mvctx, int usehp);
nmv_context *mvctx, int usehp);
void av1_build_nmv_cost_table(int *mvjoint, int *mvcost[2],
const nmv_context *mvctx, int usehp);
......
......@@ -358,7 +358,7 @@ static void set_entropy_context_b(int plane, int block, int blk_row,
static INLINE void add_token(TOKENEXTRA **t, const aom_prob *context_tree,
#if CONFIG_RANS || CONFIG_DAALA_EC
const aom_cdf_prob (*token_cdf)[ENTROPY_TOKENS],
aom_cdf_prob (*token_cdf)[ENTROPY_TOKENS],
#endif // CONFIG_RANS
int32_t extra, uint8_t token,
uint8_t skip_eob_node, unsigned int *counts) {
......@@ -484,7 +484,7 @@ static void tokenize_b(int plane, int block, int blk_row, int blk_col,
add_token(&t, coef_probs[band[c]][pt],
#if CONFIG_RANS || CONFIG_DAALA_EC
(const aom_cdf_prob(*)[ENTROPY_TOKENS]) & coef_cdfs[band[c]][pt],
&coef_cdfs[band[c]][pt],
#endif
extra, (uint8_t)token, (uint8_t)skip_eob, counts[band[c]][pt]);
......
......@@ -37,7 +37,7 @@ typedef struct {
typedef struct {
const aom_prob *context_tree;
#if CONFIG_RANS || CONFIG_DAALA_EC
const aom_cdf_prob (*token_cdf)[ENTROPY_TOKENS];
aom_cdf_prob (*token_cdf)[ENTROPY_TOKENS];
#endif
EXTRABIT extra;
uint8_t token;
......
......@@ -291,6 +291,7 @@ EXPERIMENT_LIST="
filter_7bit
parallel_deblocking
tile_groups
ec_adapt
"
CONFIG_LIST="
dependency_tracking
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment