Commit 9ac5508f authored by Thomas's avatar Thomas Committed by Yaowu Xu

Add EC_ADAPT experiment for symbol-adaptive entropy coding.

This experiment performs symbol-by-symbol statistics
adaptation for non-binary symbols. It requires DAALA_EC or
RANS and ANS to be enabled. The adaptation is currently
based on a simple recursive filter and is taken from
Daala. It has an adaptation rate dependent on alphabet size,
taken from Daala. It applies wherever non-binary symbols
are encoded using Cumulative Probability Functions rather
than trees.

Where symbols are adapted, forward updates in the compressed
header are removed.

In the case of RANS coefficient token values are adapted,
with the exception of the zero token which remains a
binary symbol. In the case of DAALA_EC other values
such as inter and intra modes are adapted as CDFs are
provided in those cases.

The experiment is configured with:

./configure --enable-experimental --enable-daala-ec --enable-ec-adapt

or

./configure --enable-experimental --enable-ans --enable-rans \
    --enable-ec-adapt

EC_ADAPT is not currently compatible with tiles.

BDR results on Objective-1-fast give a small loss:

PSNR YCbCr:      0.51%      0.49%      0.48%
PSNRHVS:      0.50%
SSIM:      0.50%
MSSSIM:      0.51%
CIEDE2000:      0.50%

Change-Id: I3888718e42616f3fd87144de7f125228446ac984
parent d722f71e
......@@ -203,7 +203,7 @@ static INLINE int aom_read_tree_(aom_reader *r, const aom_tree_index *tree,
return ret;
}
static INLINE int aom_read_symbol_(aom_reader *r, const aom_cdf_prob *cdf,
static INLINE int aom_read_symbol_(aom_reader *r, aom_cdf_prob *cdf,
int nsymbs ACCT_STR_PARAM) {
int ret;
#if CONFIG_RANS
......@@ -218,6 +218,10 @@ static INLINE int aom_read_symbol_(aom_reader *r, const aom_cdf_prob *cdf,
assert(0 && "Unsupported bitreader operation");
ret = -1;
#endif
#if ((CONFIG_RANS || CONFIG_DAALA_EC) && CONFIG_EC_ADAPT)
update_cdf(cdf, ret, nsymbs);
#endif
#if CONFIG_ACCOUNTING
if (ACCT_STR_NAME) aom_process_accounting(r, ACCT_STR_NAME);
#endif
......
......@@ -98,8 +98,8 @@ static INLINE void aom_write_tree(aom_writer *w, const aom_tree_index *tree,
#endif
}
static INLINE void aom_write_symbol(aom_writer *w, int symb,
const aom_cdf_prob *cdf, int nsymbs) {
static INLINE void aom_write_symbol(aom_writer *w, int symb, aom_cdf_prob *cdf,
int nsymbs) {
#if CONFIG_RANS
struct rans_sym s;
(void)nsymbs;
......@@ -116,6 +116,10 @@ static INLINE void aom_write_symbol(aom_writer *w, int symb,
(void)nsymbs;
assert(0 && "Unsupported bitwriter operation");
#endif
#if ((CONFIG_RANS || CONFIG_DAALA_EC) && CONFIG_EC_ADAPT)
update_cdf(cdf, symb, nsymbs);
#endif
}
#ifdef __cplusplus
......
......@@ -15,6 +15,7 @@
#include "./aom_config.h"
#include "./aom_dsp_common.h"
#include "aom_ports/bitops.h"
#include "aom_ports/mem.h"
#ifdef __cplusplus
......@@ -134,6 +135,42 @@ void av1_indices_from_tree(int *ind, int *inv, int len,
DECLARE_ALIGNED(16, extern const uint8_t, aom_norm[256]);
#if CONFIG_EC_ADAPT
static INLINE void update_cdf(aom_cdf_prob *cdf, int val, int nsymbs) {
const int rate = 3 + get_msb(nsymbs);
// Daala method
int i, tmp;
for (i = 0; i < val; ++i) {
tmp = 2 - (1 << rate) + i;
cdf[i] -= (cdf[i] - tmp) >> rate;
}
for (i = val; i < nsymbs; ++i) {
tmp = -(1 << rate) + 32768 + (1 << rate) - ((nsymbs - 1) - i);
cdf[i] -= (cdf[i] - tmp) >> rate;
}
// Slightly better
// int prob[16];
// int i;
// int diff;
// prob[0] = cdf[0];
// for (i=1; i<nsymbs; ++i)
// prob[i] = cdf[i] - cdf[i-1];
//
// for (i=0; i<nsymbs; ++i) {
// prob[i] -= (prob[i] >> rate);
// prob[i] = AOMMAX(prob[i],1);
// cdf[i] = i==0 ? prob[i] : cdf[i-1]+prob[i];
// }
// diff = (1<<15) - cdf[nsymbs-1];
//
// for (i=val; i<nsymbs; ++i) {
// cdf[i] += diff;
// }
//
}
#endif
#ifdef __cplusplus
} // extern "C"
#endif
......
......@@ -592,11 +592,10 @@ static INLINE const aom_prob *get_y_mode_probs(const AV1_COMMON *cm,
}
#if CONFIG_DAALA_EC
static INLINE const aom_cdf_prob *get_y_mode_cdf(const AV1_COMMON *cm,
const MODE_INFO *mi,
const MODE_INFO *above_mi,
const MODE_INFO *left_mi,
int block) {
static INLINE aom_cdf_prob *get_y_mode_cdf(AV1_COMMON *cm, const MODE_INFO *mi,
const MODE_INFO *above_mi,
const MODE_INFO *left_mi,
int block) {
const PREDICTION_MODE above = av1_above_block_mode(mi, above_mi, block);
const PREDICTION_MODE left = av1_left_block_mode(mi, left_mi, block);
return cm->kf_y_cdf[above][left];
......
......@@ -114,6 +114,7 @@ static TX_MODE read_tx_mode(struct aom_read_bit_buffer *rb) {
return aom_rb_read_bit(rb) ? TX_MODE_SELECT : aom_rb_read_literal(rb, 2);
}
#if !CONFIG_EC_ADAPT || !CONFIG_DAALA_EC
static void read_switchable_interp_probs(FRAME_CONTEXT *fc, aom_reader *r) {
int i, j;
for (j = 0; j < SWITCHABLE_FILTER_CONTEXTS; ++j) {
......@@ -165,6 +166,34 @@ static void read_inter_compound_mode_probs(FRAME_CONTEXT *fc, aom_reader *r) {
}
}
#endif // CONFIG_EXT_INTER
#if !CONFIG_EXT_TX
static void read_ext_tx_probs(FRAME_CONTEXT *fc, aom_reader *r) {
int i, j, k;
if (aom_read(r, GROUP_DIFF_UPDATE_PROB, ACCT_STR)) {
for (i = TX_4X4; i < EXT_TX_SIZES; ++i) {
for (j = 0; j < TX_TYPES; ++j) {
for (k = 0; k < TX_TYPES - 1; ++k)
av1_diff_update_prob(r, &fc->intra_ext_tx_prob[i][j][k], ACCT_STR);
#if CONFIG_DAALA_EC
av1_tree_to_cdf(av1_ext_tx_tree, fc->intra_ext_tx_prob[i][j],
fc->intra_ext_tx_cdf[i][j]);
#endif
}
}
}
if (aom_read(r, GROUP_DIFF_UPDATE_PROB, ACCT_STR)) {
for (i = TX_4X4; i < EXT_TX_SIZES; ++i) {
for (k = 0; k < TX_TYPES - 1; ++k)
av1_diff_update_prob(r, &fc->inter_ext_tx_prob[i][k], ACCT_STR);
#if CONFIG_DAALA_EC
av1_tree_to_cdf(av1_ext_tx_tree, fc->inter_ext_tx_prob[i],
fc->inter_ext_tx_cdf[i]);
#endif
}
}
}
#endif
#endif
static REFERENCE_MODE read_frame_reference_mode(
const AV1_COMMON *cm, struct aom_read_bit_buffer *rb) {
......@@ -214,8 +243,10 @@ static void update_mv_probs(aom_prob *p, int n, aom_reader *r) {
}
static void read_mv_probs(nmv_context *ctx, int allow_hp, aom_reader *r) {
int i, j;
int i;
#if !CONFIG_EC_ADAPT || !CONFIG_DAALA_EC
int j;
update_mv_probs(ctx->joints, MV_JOINTS - 1, r);
#if CONFIG_DAALA_EC || CONFIG_RANS
av1_tree_to_cdf(av1_mv_joint_tree, ctx->joints, ctx->joint_cdf);
......@@ -231,7 +262,6 @@ static void read_mv_probs(nmv_context *ctx, int allow_hp, aom_reader *r) {
av1_tree_to_cdf(av1_mv_class_tree, comp_ctx->classes, comp_ctx->class_cdf);
#endif
}
for (i = 0; i < 2; ++i) {
nmv_component *const comp_ctx = &ctx->comps[i];
for (j = 0; j < CLASS0_SIZE; ++j) {
......@@ -246,6 +276,7 @@ static void read_mv_probs(nmv_context *ctx, int allow_hp, aom_reader *r) {
av1_tree_to_cdf(av1_mv_fp_tree, comp_ctx->fp, comp_ctx->fp_cdf);
#endif
}
#endif // CONFIG_EC_ADAPT, CONFIG_DAALA_EC
if (allow_hp) {
for (i = 0; i < 2; ++i) {
......@@ -1884,13 +1915,18 @@ static void setup_token_decoder(const uint8_t *data, const uint8_t *data_end,
static void read_coef_probs_common(av1_coeff_probs_model *coef_probs,
aom_reader *r) {
int i, j, k, l, m;
#if CONFIG_EC_ADAPT
const int node_limit = ONE_TOKEN;
#else
const int node_limit = UNCONSTRAINED_NODES;
#endif
if (aom_read_bit(r, ACCT_STR))
for (i = 0; i < PLANE_TYPES; ++i)
for (j = 0; j < REF_TYPES; ++j)
for (k = 0; k < COEF_BANDS; ++k)
for (l = 0; l < BAND_COEFF_CONTEXTS(k); ++l)
for (m = 0; m < UNCONSTRAINED_NODES; ++m)
for (m = 0; m < node_limit; ++m)
av1_diff_update_prob(r, &coef_probs[i][j][k][l][m], ACCT_STR);
}
......@@ -3586,6 +3622,7 @@ static size_t read_uncompressed_header(AV1Decoder *pbi,
}
#if CONFIG_EXT_TX
#if !CONFIG_EC_ADAPT || !CONFIG_DAALA_EC
static void read_ext_tx_probs(FRAME_CONTEXT *fc, aom_reader *r) {
int i, j, k;
int s;
......@@ -3611,36 +3648,10 @@ static void read_ext_tx_probs(FRAME_CONTEXT *fc, aom_reader *r) {
}
}
}
#endif // !CONFIG_EC_ADAPT || !CONFIG_DAALA_EC
#else
static void read_ext_tx_probs(FRAME_CONTEXT *fc, aom_reader *r) {
int i, j, k;
if (aom_read(r, GROUP_DIFF_UPDATE_PROB, ACCT_STR)) {
for (i = TX_4X4; i < EXT_TX_SIZES; ++i) {
for (j = 0; j < TX_TYPES; ++j) {
for (k = 0; k < TX_TYPES - 1; ++k)
av1_diff_update_prob(r, &fc->intra_ext_tx_prob[i][j][k], ACCT_STR);
#if CONFIG_DAALA_EC
av1_tree_to_cdf(av1_ext_tx_tree, fc->intra_ext_tx_prob[i][j],
fc->intra_ext_tx_cdf[i][j]);
#endif
}
}
}
if (aom_read(r, GROUP_DIFF_UPDATE_PROB, ACCT_STR)) {
for (i = TX_4X4; i < EXT_TX_SIZES; ++i) {
for (k = 0; k < TX_TYPES - 1; ++k)
av1_diff_update_prob(r, &fc->inter_ext_tx_prob[i][k], ACCT_STR);
#if CONFIG_DAALA_EC
av1_tree_to_cdf(av1_ext_tx_tree, fc->inter_ext_tx_prob[i],
fc->inter_ext_tx_cdf[i]);
#endif
}
}
}
#endif // CONFIG_EXT_TX
#if CONFIG_SUPERTX
static void read_supertx_probs(FRAME_CONTEXT *fc, aom_reader *r) {
int i, j;
......@@ -3764,6 +3775,7 @@ static int read_compressed_header(AV1Decoder *pbi, const uint8_t *data,
av1_diff_update_prob(&r, &fc->delta_q_prob[k], ACCT_STR);
#endif
#if !CONFIG_EC_ADAPT || !CONFIG_DAALA_EC
if (cm->seg.enabled && cm->seg.update_map) {
if (cm->seg.temporal_update) {
for (k = 0; k < PREDICTION_PROBS; k++)
......@@ -3802,7 +3814,7 @@ static int read_compressed_header(AV1Decoder *pbi, const uint8_t *data,
#endif
}
#endif // CONFIG_EXT_PARTITION_TYPES
#endif // EC_ADAPT, DAALA_EC
#if CONFIG_EXT_INTRA
for (i = 0; i < INTRA_FILTERS + 1; ++i)
for (j = 0; j < INTRA_FILTERS - 1; ++j)
......@@ -3814,6 +3826,7 @@ static int read_compressed_header(AV1Decoder *pbi, const uint8_t *data,
#if CONFIG_DAALA_EC
av1_copy(cm->kf_y_cdf, av1_kf_y_mode_cdf);
#endif
#if !CONFIG_EC_ADAPT || !CONFIG_DAALA_EC
for (k = 0; k < INTRA_MODES; k++)
for (j = 0; j < INTRA_MODES; j++) {
for (i = 0; i < INTRA_MODES - 1; ++i)
......@@ -3823,12 +3836,14 @@ static int read_compressed_header(AV1Decoder *pbi, const uint8_t *data,
cm->kf_y_cdf[k][j]);
#endif
}
#endif // EC_ADAPT, DAALA_EC
} else {
#if !CONFIG_REF_MV
nmv_context *const nmvc = &fc->nmvc;
#endif
#if !CONFIG_EC_ADAPT || !CONFIG_DAALA_EC
read_inter_mode_probs(fc, &r);
#endif
#if CONFIG_EXT_INTER
read_inter_compound_mode_probs(fc, &r);
......@@ -3864,7 +3879,9 @@ static int read_compressed_header(AV1Decoder *pbi, const uint8_t *data,
}
#endif // CONFIG_MOTION_VAR || CONFIG_WARPED_MOTION
#if !CONFIG_EC_ADAPT || !CONFIG_DAALA_EC
if (cm->interp_filter == SWITCHABLE) read_switchable_interp_probs(fc, &r);
#endif
for (i = 0; i < INTRA_INTER_CONTEXTS; i++)
av1_diff_update_prob(&r, &fc->intra_inter_prob[i], ACCT_STR);
......@@ -3874,6 +3891,7 @@ static int read_compressed_header(AV1Decoder *pbi, const uint8_t *data,
read_frame_reference_mode_probs(cm, &r);
#if !CONFIG_EC_ADAPT || !CONFIG_DAALA_EC
for (j = 0; j < BLOCK_SIZE_GROUPS; j++) {
for (i = 0; i < INTRA_MODES - 1; ++i)
av1_diff_update_prob(&r, &fc->y_mode_prob[j][i], ACCT_STR);
......@@ -3882,6 +3900,7 @@ static int read_compressed_header(AV1Decoder *pbi, const uint8_t *data,
fc->y_mode_cdf[j]);
#endif
}
#endif
#if CONFIG_REF_MV
for (i = 0; i < NMV_CONTEXTS; ++i)
......@@ -3889,7 +3908,9 @@ static int read_compressed_header(AV1Decoder *pbi, const uint8_t *data,
#else
read_mv_probs(nmvc, cm->allow_high_precision_mv, &r);
#endif
#if !CONFIG_EC_ADAPT || !CONFIG_DAALA_EC
read_ext_tx_probs(fc, &r);
#endif // EC_ADAPT, DAALA_EC
#if CONFIG_SUPERTX
if (!xd->lossless[0]) read_supertx_probs(fc, &r);
#endif
......
......@@ -42,7 +42,7 @@ static INLINE int read_uniform(aom_reader *r, int n) {
#endif // CONFIG_EXT_INTRA || CONFIG_FILTER_INTRA || CONFIG_PALETTE
#if CONFIG_DAALA_EC
static PREDICTION_MODE read_intra_mode(aom_reader *r, const aom_cdf_prob *cdf) {
static PREDICTION_MODE read_intra_mode(aom_reader *r, aom_cdf_prob *cdf) {
return (PREDICTION_MODE)
av1_intra_mode_inv[aom_read_symbol(r, cdf, INTRA_MODES, ACCT_STR)];
}
......@@ -264,8 +264,7 @@ static PREDICTION_MODE read_inter_compound_mode(AV1_COMMON *cm, MACROBLOCKD *xd,
}
#endif // CONFIG_EXT_INTER
static int read_segment_id(aom_reader *r,
const struct segmentation_probs *segp) {
static int read_segment_id(aom_reader *r, struct segmentation_probs *segp) {
#if CONFIG_DAALA_EC
return aom_read_symbol(r, segp->tree_cdf, MAX_SEGMENTS, ACCT_STR);
#else
......@@ -796,8 +795,7 @@ static void read_intra_frame_mode_info(AV1_COMMON *const cm,
}
}
static int read_mv_component(aom_reader *r, const nmv_component *mvcomp,
int usehp) {
static int read_mv_component(aom_reader *r, nmv_component *mvcomp, int usehp) {
int mag, d, fr, hp;
const int sign = aom_read(r, mvcomp->sign, ACCT_STR);
const int mv_class =
......@@ -840,7 +838,7 @@ static int read_mv_component(aom_reader *r, const nmv_component *mvcomp,
}
static INLINE void read_mv(aom_reader *r, MV *mv, const MV *ref,
const nmv_context *ctx, nmv_context_counts *counts,
nmv_context *ctx, nmv_context_counts *counts,
int allow_hp) {
MV_JOINT_TYPE joint_type;
MV diff = { 0, 0 };
......
......@@ -48,15 +48,13 @@ static INLINE int read_coeff(const aom_prob *probs, int n, aom_reader *r) {
}
#if CONFIG_AOM_QM
static int decode_coefs(const MACROBLOCKD *xd, PLANE_TYPE type,
tran_low_t *dqcoeff, TX_SIZE tx_size, TX_TYPE tx_type,
const int16_t *dq, int ctx, const int16_t *scan,
const int16_t *nb, aom_reader *r,
const qm_val_t *iqm[2][TX_SIZES])
static int decode_coefs(MACROBLOCKD *xd, PLANE_TYPE type, tran_low_t *dqcoeff,
TX_SIZE tx_size, TX_TYPE tx_type, const int16_t *dq,
int ctx, const int16_t *scan, const int16_t *nb,
aom_reader *r, const qm_val_t *iqm[2][TX_SIZES])
#else
static int decode_coefs(const MACROBLOCKD *xd, PLANE_TYPE type,
tran_low_t *dqcoeff, TX_SIZE tx_size, TX_TYPE tx_type,
const int16_t *dq,
static int decode_coefs(MACROBLOCKD *xd, PLANE_TYPE type, tran_low_t *dqcoeff,
TX_SIZE tx_size, TX_TYPE tx_type, const int16_t *dq,
#if CONFIG_NEW_QUANT
dequant_val_type_nuq *dq_val,
#endif // CONFIG_NEW_QUANT
......@@ -66,20 +64,20 @@ static int decode_coefs(const MACROBLOCKD *xd, PLANE_TYPE type,
{
FRAME_COUNTS *counts = xd->counts;
const int max_eob = get_tx2d_size(tx_size);
const FRAME_CONTEXT *const fc = xd->fc;
FRAME_CONTEXT *const fc = xd->fc;
const int ref = is_inter_block(&xd->mi[0]->mbmi);
#if CONFIG_AOM_QM
const qm_val_t *iqmatrix = iqm[!ref][tx_size];
#endif
int band, c = 0;
const int tx_size_ctx = txsize_sqr_map[tx_size];
const aom_prob(*coef_probs)[COEFF_CONTEXTS][UNCONSTRAINED_NODES] =
aom_prob(*coef_probs)[COEFF_CONTEXTS][UNCONSTRAINED_NODES] =
fc->coef_probs[tx_size_ctx][type][ref];
const aom_prob *prob;
#if CONFIG_RANS || CONFIG_DAALA_EC
const aom_cdf_prob(*const coef_cdfs)[COEFF_CONTEXTS][ENTROPY_TOKENS] =
aom_cdf_prob(*coef_cdfs)[COEFF_CONTEXTS][ENTROPY_TOKENS] =
fc->coef_cdfs[tx_size_ctx][type][ref];
const aom_cdf_prob(*cdf)[ENTROPY_TOKENS];
aom_cdf_prob(*cdf)[ENTROPY_TOKENS];
#endif // CONFIG_RANS
unsigned int(*coef_counts)[COEFF_CONTEXTS][UNCONSTRAINED_NODES + 1];
unsigned int(*eob_branch_count)[COEFF_CONTEXTS];
......
......@@ -329,6 +329,7 @@ static void prob_diff_update(const aom_tree_index *tree,
av1_cond_prob_diff_update(w, &probs[i], branch_ct[i], probwt);
}
#if !CONFIG_EC_ADAPT || !CONFIG_DAALA_EC
static int prob_diff_update_savings(const aom_tree_index *tree,
aom_prob probs[/*n - 1*/],
const unsigned int counts[/*n - 1*/], int n,
......@@ -346,6 +347,7 @@ static int prob_diff_update_savings(const aom_tree_index *tree,
}
return savings;
}
#endif
#if CONFIG_VAR_TX
static void write_tx_size_vartx(const AV1_COMMON *cm, const MACROBLOCKD *xd,
......@@ -550,6 +552,7 @@ static void update_skip_probs(AV1_COMMON *cm, aom_writer *w,
}
}
#if !CONFIG_EC_ADAPT || !CONFIG_DAALA_EC
static void update_switchable_interp_probs(AV1_COMMON *cm, aom_writer *w,
FRAME_COUNTS *counts) {
int j;
......@@ -681,7 +684,7 @@ static void update_ext_tx_probs(AV1_COMMON *cm, aom_writer *w) {
}
}
#endif // CONFIG_EXT_TX
#endif
#if CONFIG_PALETTE
static void pack_palette_tokens(aom_writer *w, const TOKENEXTRA **tp, int n,
int num) {
......@@ -877,8 +880,7 @@ static void pack_txb_tokens(aom_writer *w, const TOKENEXTRA **tp,
#endif
static void write_segment_id(aom_writer *w, const struct segmentation *seg,
const struct segmentation_probs *segp,
int segment_id) {
struct segmentation_probs *segp, int segment_id) {
if (seg->enabled && seg->update_map) {
#if CONFIG_DAALA_EC
aom_write_symbol(w, segment_id, segp->tree_cdf, MAX_SEGMENTS);
......@@ -1139,7 +1141,7 @@ static void pack_inter_mode_mvs(AV1_COMP *cpi, const MODE_INFO *mi,
aom_writer *w) {
AV1_COMMON *const cm = &cpi->common;
#if !CONFIG_REF_MV
const nmv_context *nmvc = &cm->fc->nmvc;
nmv_context *nmvc = &cm->fc->nmvc;
#endif
#if CONFIG_DELTA_Q
......@@ -1150,7 +1152,7 @@ static void pack_inter_mode_mvs(AV1_COMP *cpi, const MODE_INFO *mi,
const MACROBLOCKD *xd = &x->e_mbd;
#endif
const struct segmentation *const seg = &cm->seg;
const struct segmentation_probs *const segp = &cm->fc->seg;
struct segmentation_probs *const segp = &cm->fc->seg;
const MB_MODE_INFO *const mbmi = &mi->mbmi;
const MB_MODE_INFO_EXT *const mbmi_ext = x->mbmi_ext;
const PREDICTION_MODE mode = mbmi->mode;
......@@ -1363,7 +1365,7 @@ static void pack_inter_mode_mvs(AV1_COMP *cpi, const MODE_INFO *mi,
int nmv_ctx = av1_nmv_ctx(mbmi_ext->ref_mv_count[rf_type],
mbmi_ext->ref_mv_stack[rf_type], ref,
mbmi->ref_mv_idx);
const nmv_context *nmvc = &cm->fc->nmvc[nmv_ctx];
nmv_context *nmvc = &cm->fc->nmvc[nmv_ctx];
#endif
av1_encode_mv(cpi, w, &mi->bmi[j].as_mv[ref].as_mv,
#if CONFIG_EXT_INTER
......@@ -1388,7 +1390,7 @@ static void pack_inter_mode_mvs(AV1_COMP *cpi, const MODE_INFO *mi,
int nmv_ctx = av1_nmv_ctx(mbmi_ext->ref_mv_count[rf_type],
mbmi_ext->ref_mv_stack[rf_type], 1,
mbmi->ref_mv_idx);
const nmv_context *nmvc = &cm->fc->nmvc[nmv_ctx];
nmv_context *nmvc = &cm->fc->nmvc[nmv_ctx];
#endif
av1_encode_mv(cpi, w, &mi->bmi[j].as_mv[1].as_mv,
&mi->bmi[j].ref_mv[1].as_mv,
......@@ -1402,7 +1404,7 @@ static void pack_inter_mode_mvs(AV1_COMP *cpi, const MODE_INFO *mi,
int nmv_ctx = av1_nmv_ctx(mbmi_ext->ref_mv_count[rf_type],
mbmi_ext->ref_mv_stack[rf_type], 0,
mbmi->ref_mv_idx);
const nmv_context *nmvc = &cm->fc->nmvc[nmv_ctx];
nmv_context *nmvc = &cm->fc->nmvc[nmv_ctx];
#endif
av1_encode_mv(cpi, w, &mi->bmi[j].as_mv[0].as_mv,
&mi->bmi[j].ref_mv[0].as_mv,
......@@ -1427,7 +1429,7 @@ static void pack_inter_mode_mvs(AV1_COMP *cpi, const MODE_INFO *mi,
int nmv_ctx = av1_nmv_ctx(mbmi_ext->ref_mv_count[rf_type],
mbmi_ext->ref_mv_stack[rf_type], ref,
mbmi->ref_mv_idx);
const nmv_context *nmvc = &cm->fc->nmvc[nmv_ctx];
nmv_context *nmvc = &cm->fc->nmvc[nmv_ctx];
#endif
ref_mv = mbmi_ext->ref_mvs[mbmi->ref_frame[ref]][0];
#if CONFIG_EXT_INTER
......@@ -1453,7 +1455,7 @@ static void pack_inter_mode_mvs(AV1_COMP *cpi, const MODE_INFO *mi,
int nmv_ctx =
av1_nmv_ctx(mbmi_ext->ref_mv_count[rf_type],
mbmi_ext->ref_mv_stack[rf_type], 1, mbmi->ref_mv_idx);
const nmv_context *nmvc = &cm->fc->nmvc[nmv_ctx];
nmv_context *nmvc = &cm->fc->nmvc[nmv_ctx];
#endif
av1_encode_mv(cpi, w, &mbmi->mv[1].as_mv,
&mbmi_ext->ref_mvs[mbmi->ref_frame[1]][0].as_mv,
......@@ -1467,7 +1469,7 @@ static void pack_inter_mode_mvs(AV1_COMP *cpi, const MODE_INFO *mi,
int nmv_ctx =
av1_nmv_ctx(mbmi_ext->ref_mv_count[rf_type],
mbmi_ext->ref_mv_stack[rf_type], 0, mbmi->ref_mv_idx);
const nmv_context *nmvc = &cm->fc->nmvc[nmv_ctx];
nmv_context *nmvc = &cm->fc->nmvc[nmv_ctx];
#endif
av1_encode_mv(cpi, w, &mbmi->mv[0].as_mv,
&mbmi_ext->ref_mvs[mbmi->ref_frame[0]][0].as_mv,
......@@ -1613,15 +1615,15 @@ static void pack_inter_mode_mvs(AV1_COMP *cpi, const MODE_INFO *mi,
}
#if CONFIG_DELTA_Q
static void write_mb_modes_kf(const AV1_COMMON *cm, MACROBLOCKD *xd,
static void write_mb_modes_kf(AV1_COMMON *cm, MACROBLOCKD *xd,
MODE_INFO **mi_8x8, aom_writer *w) {
int skip;
#else
static void write_mb_modes_kf(const AV1_COMMON *cm, const MACROBLOCKD *xd,
static void write_mb_modes_kf(AV1_COMMON *cm, const MACROBLOCKD *xd,
MODE_INFO **mi_8x8, aom_writer *w) {
#endif
const struct segmentation *const seg = &cm->seg;
const struct segmentation_probs *const segp = &cm->fc->seg;
struct segmentation_probs *const segp = &cm->fc->seg;
const MODE_INFO *const mi = mi_8x8[0];
const MODE_INFO *const above_mi = xd->above_mi;
const MODE_INFO *const left_mi = xd->left_mi;
......@@ -2203,7 +2205,11 @@ static void update_coef_probs_common(aom_writer *const bc, AV1_COMP *cpi,
av1_coeff_probs_model *new_coef_probs) {
av1_coeff_probs_model *old_coef_probs = cpi->common.fc->coef_probs[tx_size];
const aom_prob upd = DIFF_UPDATE_PROB;
#if CONFIG_EC_ADAPT
const int entropy_nodes_update = ONE_TOKEN;
#else
const int entropy_nodes_update = UNCONSTRAINED_NODES;
#endif
int i, j, k, l, t;
int stepsize = cpi->sf.coeff_prob_appx_step;
#if CONFIG_TILE_GROUPS
......@@ -2889,6 +2895,7 @@ static void encode_segmentation(AV1_COMMON *cm, MACROBLOCKD *xd,
}
}
#if !CONFIG_EC_ADAPT || !CONFIG_DAALA_EC
static void update_seg_probs(AV1_COMP *cpi, aom_writer *w) {
AV1_COMMON *cm = &cpi->common;
#if CONFIG_TILE_GROUPS
......@@ -2917,6 +2924,7 @@ static void update_seg_probs(AV1_COMP *cpi, aom_writer *w) {
cm->fc->seg.tree_cdf);
#endif
}
#endif // CONFIG_EC_ADAPT,CONFIG_DAALA_EC
static void write_txfm_mode(TX_MODE mode, struct aom_write_bit_buffer *wb) {
aom_wb_write_bit(wb, mode == TX_MODE_SELECT);
......@@ -3691,6 +3699,7 @@ static uint32_t write_compressed_header(AV1_COMP *cpi, uint8_t *data) {
FRAME_COUNTS *counts = cpi->td.counts;
aom_writer *header_bc;
int i, j;
#if CONFIG_TILE_GROUPS
const int probwt = cm->num_tg;
#else
......@@ -3713,6 +3722,7 @@ static uint32_t write_compressed_header(AV1_COMP *cpi, uint8_t *data) {
#endif // CONFIG_LOOP_RESTORATION
update_txfm_probs(cm, header_bc, counts);
update_coef_probs(cpi, header_bc);
#if CONFIG_VAR_TX
......@@ -3730,6 +3740,7 @@ static uint32_t write_compressed_header(AV1_COMP *cpi, uint8_t *data) {
#if CONFIG_DELTA_Q
update_delta_q_probs(cm, header_bc, counts);
#endif
#if !CONFIG_EC_ADAPT || !CONFIG_DAALA_EC
update_seg_probs(cpi, header_bc);
for (i = 0; i < INTRA_MODES; ++i) {
......@@ -3764,12 +3775,14 @@ static uint32_t write_compressed_header(AV1_COMP *cpi, uint8_t *data) {
prob_diff_update(av1_intra_filter_tree, fc->intra_filter_probs[i],
counts->intra_filter[i], INTRA_FILTERS, probwt, header_bc);
#endif // CONFIG_EXT_INTRA
#endif // CONFIG_EC_ADAPT, CONFIG_DAALA_EC
if (frame_is_intra_only(cm)) {
av1_copy(cm->kf_y_prob, av1_kf_y_mode_prob);
#if CONFIG_DAALA_EC
av1_copy(cm->kf_y_cdf, av1_kf_y_mode_cdf);
#endif
#if !CONFIG_EC_ADAPT || !CONFIG_DAALA_EC
for (i = 0; i < INTRA_MODES; ++i)
for (j = 0; j < INTRA_MODES; ++j) {
prob_diff_update(av1_intra_mode_tree, cm->kf_y_prob[i][j],
......@@ -3780,7 +3793,9 @@ static uint32_t write_compressed_header(AV1_COMP *cpi, uint8_t *data) {
cm->kf_y_cdf[i][j]);
#endif
}
#endif // CONFIG_EC_ADAPT, CONFIG_DAALA_EC
} else {
#if !CONFIG_EC_ADAPT || !CONFIG_DAALA_EC
#if CONFIG_REF_MV
update_inter_mode_probs(cm, header_bc, counts);
#else
......@@ -3793,7 +3808,7 @@ static uint32_t write_compressed_header(AV1_COMP *cpi, uint8_t *data) {
#endif
}
#endif
#endif // CONFIG_EC_ADAPT, CONFIG_DAALA_EC
#if CONFIG_EXT_INTER
update_inter_compound_mode_probs(cm, probwt, header_bc);
......@@ -3828,9 +3843,10 @@ static uint32_t write_compressed_header(AV1_COMP *cpi, uint8_t *data) {
prob_diff_update(av1_motion_mode_tree,