Commit b0112dfd authored by Thomas's avatar Thomas Committed by Thomas Davies
Browse files

Add EC_ADAPT experiment for symbol-adaptive entropy coding.

This experiment performs symbol-by-symbol statistics
adaptation for non-binary symbols. It requires DAALA_EC or
RANS and ANS to be enabled. The adaptation is currently
based on a simple recursive filter and is taken from
Daala. It has an adaptation rate dependent on alphabet size,
taken from Daala. It applies wherever non-binary symbols
are encoded using Cumulative Probability Functions rather
than trees.

Where symbols are adapted, forward updates in the compressed
header are removed.

In the case of RANS coefficient token values are adapted,
with the exception of the zero token which remains a
binary symbol. In the case of DAALA_EC other values
such as inter and intra modes are adapted as CDFs are
provided in those cases.

The experiment is configured with:

./configure --enable-experimental --enable-daala-ec --enable-ec-adapt

or

./configure --enable-experimental --enable-ans --enable-rans \
    --enable-ec-adapt

EC_ADAPT is not currently compatible with tiles.

BDR results on Objective-1-fast give a small loss:

PSNR YCbCr:      0.51%      0.49%      0.48%
PSNRHVS:      0.50%
SSIM:      0.50%
MSSSIM:      0.51%
CIEDE2000:      0.50%

Change-Id: I3888718e42616f3fd87144de7f125228446ac984
parent 85437b21
......@@ -232,7 +232,7 @@ static INLINE int aom_read_tree_(aom_reader *r, const aom_tree_index *tree,
return ret;
}
static INLINE int aom_read_symbol_(aom_reader *r, const aom_cdf_prob *cdf,
static INLINE int aom_read_symbol_(aom_reader *r, aom_cdf_prob *cdf,
int nsymbs ACCT_STR_PARAM) {
int ret;
#if CONFIG_RANS
......@@ -247,6 +247,10 @@ static INLINE int aom_read_symbol_(aom_reader *r, const aom_cdf_prob *cdf,
assert(0 && "Unsupported bitreader operation");
ret = -1;
#endif
#if ((CONFIG_RANS || CONFIG_DAALA_EC) && CONFIG_EC_ADAPT)
update_cdf(cdf, ret, nsymbs);
#endif
#if CONFIG_ACCOUNTING
if (ACCT_STR_NAME) aom_process_accounting(r, ACCT_STR_NAME);
#endif
......
......@@ -115,8 +115,8 @@ static INLINE void aom_write_tree(aom_writer *w, const aom_tree_index *tree,
#endif
}
static INLINE void aom_write_symbol(aom_writer *w, int symb,
const aom_cdf_prob *cdf, int nsymbs) {
static INLINE void aom_write_symbol(aom_writer *w, int symb, aom_cdf_prob *cdf,
int nsymbs) {
#if CONFIG_RANS
struct rans_sym s;
(void)nsymbs;
......@@ -133,6 +133,10 @@ static INLINE void aom_write_symbol(aom_writer *w, int symb,
(void)nsymbs;
assert(0 && "Unsupported bitwriter operation");
#endif
#if ((CONFIG_RANS || CONFIG_DAALA_EC) && CONFIG_EC_ADAPT)
update_cdf(cdf, symb, nsymbs);
#endif
}
#ifdef __cplusplus
......
......@@ -15,6 +15,7 @@
#include "./aom_config.h"
#include "./aom_dsp_common.h"
#include "aom_ports/bitops.h"
#include "aom_ports/mem.h"
#ifdef __cplusplus
......@@ -134,6 +135,42 @@ void av1_indices_from_tree(int *ind, int *inv, int len,
DECLARE_ALIGNED(16, extern const uint8_t, aom_norm[256]);
#if CONFIG_EC_ADAPT
static INLINE void update_cdf(aom_cdf_prob *cdf, int val, int nsymbs) {
const int rate = 3 + get_msb(nsymbs);
// Daala method
int i, tmp;
for (i = 0; i < val; ++i) {
tmp = 2 - (1 << rate) + i;
cdf[i] -= (cdf[i] - tmp) >> rate;
}
for (i = val; i < nsymbs; ++i) {
tmp = -(1 << rate) + 32768 + (1 << rate) - ((nsymbs - 1) - i);
cdf[i] -= (cdf[i] - tmp) >> rate;
}
// Slightly better
// int prob[16];
// int i;
// int diff;
// prob[0] = cdf[0];
// for (i=1; i<nsymbs; ++i)
// prob[i] = cdf[i] - cdf[i-1];
//
// for (i=0; i<nsymbs; ++i) {
// prob[i] -= (prob[i] >> rate);
// prob[i] = AOMMAX(prob[i],1);
// cdf[i] = i==0 ? prob[i] : cdf[i-1]+prob[i];
// }
// diff = (1<<15) - cdf[nsymbs-1];
//
// for (i=val; i<nsymbs; ++i) {
// cdf[i] += diff;
// }
//
}
#endif
#ifdef __cplusplus
} // extern "C"
#endif
......
......@@ -545,11 +545,10 @@ static INLINE const aom_prob *get_y_mode_probs(const AV1_COMMON *cm,
}
#if CONFIG_DAALA_EC
static INLINE const aom_cdf_prob *get_y_mode_cdf(const AV1_COMMON *cm,
const MODE_INFO *mi,
const MODE_INFO *above_mi,
const MODE_INFO *left_mi,
int block) {
static INLINE aom_cdf_prob *get_y_mode_cdf(AV1_COMMON *cm, const MODE_INFO *mi,
const MODE_INFO *above_mi,
const MODE_INFO *left_mi,
int block) {
const PREDICTION_MODE above = av1_above_block_mode(mi, above_mi, block);
const PREDICTION_MODE left = av1_left_block_mode(mi, left_mi, block);
return cm->kf_y_cdf[above][left];
......
......@@ -132,6 +132,7 @@ static void read_tx_mode_probs(struct tx_probs *tx_probs, aom_reader *r) {
av1_diff_update_prob(r, &tx_probs->p32x32[i][j], ACCT_STR);
}
#if !CONFIG_EC_ADAPT || !CONFIG_DAALA_EC
static void read_switchable_interp_probs(FRAME_CONTEXT *fc, aom_reader *r) {
int i, j;
for (j = 0; j < SWITCHABLE_FILTER_CONTEXTS; ++j) {
......@@ -168,6 +169,33 @@ static void read_inter_mode_probs(FRAME_CONTEXT *fc, aom_reader *r) {
#endif
}
static void read_ext_tx_probs(FRAME_CONTEXT *fc, aom_reader *r) {
int i, j, k;
if (aom_read(r, GROUP_DIFF_UPDATE_PROB, ACCT_STR)) {
for (i = TX_4X4; i < EXT_TX_SIZES; ++i) {
for (j = 0; j < TX_TYPES; ++j) {
for (k = 0; k < TX_TYPES - 1; ++k)
av1_diff_update_prob(r, &fc->intra_ext_tx_prob[i][j][k], ACCT_STR);
#if CONFIG_DAALA_EC
av1_tree_to_cdf(av1_ext_tx_tree, fc->intra_ext_tx_prob[i][j],
fc->intra_ext_tx_cdf[i][j]);
#endif
}
}
}
if (aom_read(r, GROUP_DIFF_UPDATE_PROB, ACCT_STR)) {
for (i = TX_4X4; i < EXT_TX_SIZES; ++i) {
for (k = 0; k < TX_TYPES - 1; ++k)
av1_diff_update_prob(r, &fc->inter_ext_tx_prob[i][k], ACCT_STR);
#if CONFIG_DAALA_EC
av1_tree_to_cdf(av1_ext_tx_tree, fc->inter_ext_tx_prob[i],
fc->inter_ext_tx_cdf[i]);
#endif
}
}
}
#endif
static REFERENCE_MODE read_frame_reference_mode(
const AV1_COMMON *cm, struct aom_read_bit_buffer *rb) {
if (is_compound_reference_allowed(cm)) {
......@@ -211,8 +239,10 @@ static void update_mv_probs(aom_prob *p, int n, aom_reader *r) {
}
static void read_mv_probs(nmv_context *ctx, int allow_hp, aom_reader *r) {
int i, j;
int i;
#if !CONFIG_EC_ADAPT || !CONFIG_DAALA_EC
int j;
update_mv_probs(ctx->joints, MV_JOINTS - 1, r);
#if CONFIG_DAALA_EC
av1_tree_to_cdf(av1_mv_joint_tree, ctx->joints, ctx->joint_cdf);
......@@ -228,7 +258,6 @@ static void read_mv_probs(nmv_context *ctx, int allow_hp, aom_reader *r) {
av1_tree_to_cdf(av1_mv_class_tree, comp_ctx->classes, comp_ctx->class_cdf);
#endif
}
for (i = 0; i < 2; ++i) {
nmv_component *const comp_ctx = &ctx->comps[i];
for (j = 0; j < CLASS0_SIZE; ++j) {
......@@ -243,6 +272,7 @@ static void read_mv_probs(nmv_context *ctx, int allow_hp, aom_reader *r) {
av1_tree_to_cdf(av1_mv_fp_tree, comp_ctx->fp, comp_ctx->fp_cdf);
#endif
}
#endif // CONFIG_EC_ADAPT, CONFIG_DAALA_EC
if (allow_hp) {
for (i = 0; i < 2; ++i) {
......@@ -724,13 +754,18 @@ static void setup_token_decoder(const uint8_t *data, const uint8_t *data_end,
static void read_coef_probs_common(av1_coeff_probs_model *coef_probs,
aom_reader *r) {
int i, j, k, l, m;
#if CONFIG_EC_ADAPT
const int node_limit = ONE_TOKEN;
#else
const int node_limit = UNCONSTRAINED_NODES;
#endif
if (aom_read_bit(r, ACCT_STR))
for (i = 0; i < PLANE_TYPES; ++i)
for (j = 0; j < REF_TYPES; ++j)
for (k = 0; k < COEF_BANDS; ++k)
for (l = 0; l < BAND_COEFF_CONTEXTS(k); ++l)
for (m = 0; m < UNCONSTRAINED_NODES; ++m)
for (m = 0; m < node_limit; ++m)
av1_diff_update_prob(r, &coef_probs[i][j][k][l][m], ACCT_STR);
}
......@@ -2014,32 +2049,6 @@ static size_t read_uncompressed_header(AV1Decoder *pbi,
return sz;
}
static void read_ext_tx_probs(FRAME_CONTEXT *fc, aom_reader *r) {
int i, j, k;
if (aom_read(r, GROUP_DIFF_UPDATE_PROB, ACCT_STR)) {
for (i = TX_4X4; i < EXT_TX_SIZES; ++i) {
for (j = 0; j < TX_TYPES; ++j) {
for (k = 0; k < TX_TYPES - 1; ++k)
av1_diff_update_prob(r, &fc->intra_ext_tx_prob[i][j][k], ACCT_STR);
#if CONFIG_DAALA_EC
av1_tree_to_cdf(av1_ext_tx_tree, fc->intra_ext_tx_prob[i][j],
fc->intra_ext_tx_cdf[i][j]);
#endif
}
}
}
if (aom_read(r, GROUP_DIFF_UPDATE_PROB, ACCT_STR)) {
for (i = TX_4X4; i < EXT_TX_SIZES; ++i) {
for (k = 0; k < TX_TYPES - 1; ++k)
av1_diff_update_prob(r, &fc->inter_ext_tx_prob[i][k], ACCT_STR);
#if CONFIG_DAALA_EC
av1_tree_to_cdf(av1_ext_tx_tree, fc->inter_ext_tx_prob[i],
fc->inter_ext_tx_cdf[i]);
#endif
}
}
}
static int read_compressed_header(AV1Decoder *pbi, const uint8_t *data,
size_t partition_size) {
AV1_COMMON *const cm = &pbi->common;
......@@ -2053,6 +2062,7 @@ static int read_compressed_header(AV1Decoder *pbi, const uint8_t *data,
"Failed to allocate bool decoder 0");
if (cm->tx_mode == TX_MODE_SELECT) read_tx_mode_probs(&fc->tx_probs, &r);
read_coef_probs(fc, cm->tx_mode, &r);
for (k = 0; k < SKIP_CONTEXTS; ++k)
......@@ -2063,6 +2073,7 @@ static int read_compressed_header(AV1Decoder *pbi, const uint8_t *data,
av1_diff_update_prob(&r, &fc->delta_q_prob[k], ACCT_STR);
#endif
#if !CONFIG_EC_ADAPT || !CONFIG_DAALA_EC
if (cm->seg.enabled && cm->seg.update_map) {
if (cm->seg.temporal_update) {
for (k = 0; k < PREDICTION_PROBS; k++)
......@@ -2093,12 +2104,14 @@ static int read_compressed_header(AV1Decoder *pbi, const uint8_t *data,
fc->partition_cdf[j]);
#endif
}
#endif // EC_ADAPT, DAALA_EC
if (frame_is_intra_only(cm)) {
av1_copy(cm->kf_y_prob, av1_kf_y_mode_prob);
#if CONFIG_DAALA_EC
av1_copy(cm->kf_y_cdf, av1_kf_y_mode_cdf);
#endif
#if !CONFIG_EC_ADAPT || !CONFIG_DAALA_EC
for (k = 0; k < INTRA_MODES; k++)
for (j = 0; j < INTRA_MODES; j++) {
for (i = 0; i < INTRA_MODES - 1; ++i)
......@@ -2108,11 +2121,14 @@ static int read_compressed_header(AV1Decoder *pbi, const uint8_t *data,
cm->kf_y_cdf[k][j]);
#endif
}
#endif // EC_ADAPT, DAALA_EC
} else {
#if !CONFIG_REF_MV
nmv_context *const nmvc = &fc->nmvc;
#endif
#if !CONFIG_EC_ADAPT || !CONFIG_DAALA_EC
read_inter_mode_probs(fc, &r);
#endif
#if CONFIG_MOTION_VAR
for (j = 0; j < BLOCK_SIZES; ++j)
......@@ -2122,7 +2138,9 @@ static int read_compressed_header(AV1Decoder *pbi, const uint8_t *data,
}
#endif // CONFIG_MOTION_VAR
#if !CONFIG_EC_ADAPT || !CONFIG_DAALA_EC
if (cm->interp_filter == SWITCHABLE) read_switchable_interp_probs(fc, &r);
#endif
for (i = 0; i < INTRA_INTER_CONTEXTS; i++)
av1_diff_update_prob(&r, &fc->intra_inter_prob[i], ACCT_STR);
......@@ -2131,6 +2149,7 @@ static int read_compressed_header(AV1Decoder *pbi, const uint8_t *data,
setup_compound_reference_mode(cm);
read_frame_reference_mode_probs(cm, &r);
#if !CONFIG_EC_ADAPT || !CONFIG_DAALA_EC
for (j = 0; j < BLOCK_SIZE_GROUPS; j++) {
for (i = 0; i < INTRA_MODES - 1; ++i)
av1_diff_update_prob(&r, &fc->y_mode_prob[j][i], ACCT_STR);
......@@ -2139,6 +2158,7 @@ static int read_compressed_header(AV1Decoder *pbi, const uint8_t *data,
fc->y_mode_cdf[j]);
#endif
}
#endif
#if CONFIG_REF_MV
for (i = 0; i < NMV_CONTEXTS; ++i)
......@@ -2146,7 +2166,9 @@ static int read_compressed_header(AV1Decoder *pbi, const uint8_t *data,
#else
read_mv_probs(nmvc, cm->allow_high_precision_mv, &r);
#endif
#if !CONFIG_EC_ADAPT || !CONFIG_DAALA_EC
read_ext_tx_probs(fc, &r);
#endif // EC_ADAPT, DAALA_EC
}
return aom_reader_has_error(&r);
......
......@@ -28,7 +28,7 @@
#define ACCT_STR __func__
#if CONFIG_DAALA_EC
static PREDICTION_MODE read_intra_mode(aom_reader *r, const aom_cdf_prob *cdf) {
static PREDICTION_MODE read_intra_mode(aom_reader *r, aom_cdf_prob *cdf) {
return (PREDICTION_MODE)
av1_intra_mode_inv[aom_read_symbol(r, cdf, INTRA_MODES, ACCT_STR)];
}
......@@ -220,8 +220,7 @@ static MOTION_MODE read_motion_mode(AV1_COMMON *cm, MACROBLOCKD *xd,
}
#endif // CONFIG_MOTION_VAR
static int read_segment_id(aom_reader *r,
const struct segmentation_probs *segp) {
static int read_segment_id(aom_reader *r, struct segmentation_probs *segp) {
#if CONFIG_DAALA_EC
return aom_read_symbol(r, segp->tree_cdf, MAX_SEGMENTS, ACCT_STR);
#else
......@@ -569,8 +568,7 @@ static void read_intra_frame_mode_info(AV1_COMMON *const cm,
}
}
static int read_mv_component(aom_reader *r, const nmv_component *mvcomp,
int usehp) {
static int read_mv_component(aom_reader *r, nmv_component *mvcomp, int usehp) {
int mag, d, fr, hp;
const int sign = aom_read(r, mvcomp->sign, ACCT_STR);
const int mv_class =
......@@ -613,7 +611,7 @@ static int read_mv_component(aom_reader *r, const nmv_component *mvcomp,
}
static INLINE void read_mv(aom_reader *r, MV *mv, const MV *ref,
const nmv_context *ctx, nmv_context_counts *counts,
nmv_context *ctx, nmv_context_counts *counts,
int allow_hp) {
const MV_JOINT_TYPE joint_type =
#if CONFIG_DAALA_EC
......
......@@ -47,32 +47,31 @@ static INLINE int read_coeff(const aom_prob *probs, int n, aom_reader *r) {
}
#if CONFIG_AOM_QM
static int decode_coefs(const MACROBLOCKD *xd, PLANE_TYPE type,
tran_low_t *dqcoeff, TX_SIZE tx_size, const int16_t *dq,
int ctx, const int16_t *scan, const int16_t *nb,
aom_reader *r, const qm_val_t *iqm[2][TX_SIZES])
static int decode_coefs(MACROBLOCKD *xd, PLANE_TYPE type, tran_low_t *dqcoeff,
TX_SIZE tx_size, const int16_t *dq, int ctx,
const int16_t *scan, const int16_t *nb, aom_reader *r,
const qm_val_t *iqm[2][TX_SIZES])
#else
static int decode_coefs(const MACROBLOCKD *xd, PLANE_TYPE type,
tran_low_t *dqcoeff, TX_SIZE tx_size, const int16_t *dq,
int ctx, const int16_t *scan, const int16_t *nb,
aom_reader *r)
static int decode_coefs(MACROBLOCKD *xd, PLANE_TYPE type, tran_low_t *dqcoeff,
TX_SIZE tx_size, const int16_t *dq, int ctx,
const int16_t *scan, const int16_t *nb, aom_reader *r)
#endif
{
FRAME_COUNTS *counts = xd->counts;
const int max_eob = 1 << (tx_size_1d_log2[tx_size] * 2);
const FRAME_CONTEXT *const fc = xd->fc;
FRAME_CONTEXT *const fc = xd->fc;
const int ref = is_inter_block(&xd->mi[0]->mbmi);
#if CONFIG_AOM_QM
const qm_val_t *iqmatrix = iqm[!ref][tx_size];
#endif
int band, c = 0;
const aom_prob(*coef_probs)[COEFF_CONTEXTS][UNCONSTRAINED_NODES] =
aom_prob(*coef_probs)[COEFF_CONTEXTS][UNCONSTRAINED_NODES] =
fc->coef_probs[tx_size][type][ref];
const aom_prob *prob;
#if CONFIG_RANS || CONFIG_DAALA_EC
const aom_cdf_prob(*const coef_cdfs)[COEFF_CONTEXTS][ENTROPY_TOKENS] =
aom_cdf_prob(*coef_cdfs)[COEFF_CONTEXTS][ENTROPY_TOKENS] =
fc->coef_cdfs[tx_size][type][ref];
const aom_cdf_prob(*cdf)[ENTROPY_TOKENS];
aom_cdf_prob(*cdf)[ENTROPY_TOKENS];
#endif // CONFIG_RANS
unsigned int(*coef_counts)[COEFF_CONTEXTS][UNCONSTRAINED_NODES + 1];
unsigned int(*eob_branch_count)[COEFF_CONTEXTS];
......
......@@ -232,6 +232,7 @@ static void encode_unsigned_max(struct aom_write_bit_buffer *wb, int data,
aom_wb_write_literal(wb, data, get_unsigned_bits(max));
}
#if !CONFIG_EC_ADAPT || !CONFIG_DAALA_EC
static void prob_diff_update(const aom_tree_index *tree,
aom_prob probs[/*n - 1*/],
const unsigned int counts[/*n - 1*/], int n,
......@@ -264,6 +265,7 @@ static int prob_diff_update_savings(const aom_tree_index *tree,
}
return savings;
}
#endif
static void write_selected_tx_size(const AV1_COMMON *cm, const MACROBLOCKD *xd,
aom_writer *w) {
......@@ -369,6 +371,7 @@ static void update_skip_probs(AV1_COMMON *cm, aom_writer *w,
}
}
#if !CONFIG_EC_ADAPT || !CONFIG_DAALA_EC
static void update_switchable_interp_probs(AV1_COMMON *cm, aom_writer *w,
FRAME_COUNTS *counts) {
int j;
......@@ -440,6 +443,7 @@ static void update_ext_tx_probs(AV1_COMMON *cm, aom_writer *w) {
}
}
}
#endif
#if CONFIG_PALETTE
static void pack_palette_tokens(aom_writer *w, TOKENEXTRA **tp, int n,
......@@ -559,8 +563,7 @@ static void pack_mb_tokens(aom_writer *w, TOKENEXTRA **tp,
}
static void write_segment_id(aom_writer *w, const struct segmentation *seg,
const struct segmentation_probs *segp,
int segment_id) {
struct segmentation_probs *segp, int segment_id) {
if (seg->enabled && seg->update_map) {
#if CONFIG_DAALA_EC
aom_write_symbol(w, segment_id, segp->tree_cdf, MAX_SEGMENTS);
......@@ -756,7 +759,7 @@ static void pack_inter_mode_mvs(AV1_COMP *cpi, const MODE_INFO *mi,
aom_writer *w) {
AV1_COMMON *const cm = &cpi->common;
#if !CONFIG_REF_MV
const nmv_context *nmvc = &cm->fc->nmvc;
nmv_context *nmvc = &cm->fc->nmvc;
#endif
#if CONFIG_DELTA_Q
......@@ -767,7 +770,7 @@ static void pack_inter_mode_mvs(AV1_COMP *cpi, const MODE_INFO *mi,
const MACROBLOCKD *const xd = &x->e_mbd;
#endif
const struct segmentation *const seg = &cm->seg;
const struct segmentation_probs *const segp = &cm->fc->seg;
struct segmentation_probs *const segp = &cm->fc->seg;
const MB_MODE_INFO *const mbmi = &mi->mbmi;
const MB_MODE_INFO_EXT *const mbmi_ext = x->mbmi_ext;
const PREDICTION_MODE mode = mbmi->mode;
......@@ -897,7 +900,7 @@ static void pack_inter_mode_mvs(AV1_COMP *cpi, const MODE_INFO *mi,
int nmv_ctx = av1_nmv_ctx(mbmi_ext->ref_mv_count[rf_type],
mbmi_ext->ref_mv_stack[rf_type], ref,
mbmi->ref_mv_idx);
const nmv_context *nmvc = &cm->fc->nmvc[nmv_ctx];
nmv_context *nmvc = &cm->fc->nmvc[nmv_ctx];
#endif
av1_encode_mv(cpi, w, &mi->bmi[j].as_mv[ref].as_mv,
&mbmi_ext->ref_mvs[mbmi->ref_frame[ref]][0].as_mv,
......@@ -915,7 +918,7 @@ static void pack_inter_mode_mvs(AV1_COMP *cpi, const MODE_INFO *mi,
int nmv_ctx = av1_nmv_ctx(mbmi_ext->ref_mv_count[rf_type],
mbmi_ext->ref_mv_stack[rf_type], ref,
mbmi->ref_mv_idx);
const nmv_context *nmvc = &cm->fc->nmvc[nmv_ctx];
nmv_context *nmvc = &cm->fc->nmvc[nmv_ctx];
#endif
ref_mv = mbmi_ext->ref_mvs[mbmi->ref_frame[ref]][0];
av1_encode_mv(cpi, w, &mbmi->mv[ref].as_mv, &ref_mv.as_mv, nmvc,
......@@ -963,15 +966,15 @@ static void pack_inter_mode_mvs(AV1_COMP *cpi, const MODE_INFO *mi,
}
#if CONFIG_DELTA_Q
static void write_mb_modes_kf(const AV1_COMMON *cm, MACROBLOCKD *xd,
static void write_mb_modes_kf(AV1_COMMON *cm, MACROBLOCKD *xd,
MODE_INFO **mi_8x8, aom_writer *w) {
int skip;
#else
static void write_mb_modes_kf(const AV1_COMMON *cm, const MACROBLOCKD *xd,
static void write_mb_modes_kf(AV1_COMMON *cm, const MACROBLOCKD *xd,
MODE_INFO **mi_8x8, aom_writer *w) {
#endif
const struct segmentation *const seg = &cm->seg;
const struct segmentation_probs *const segp = &cm->fc->seg;
struct segmentation_probs *const segp = &cm->fc->seg;
const MODE_INFO *const mi = mi_8x8[0];
const MODE_INFO *const above_mi = xd->above_mi;
const MODE_INFO *const left_mi = xd->left_mi;
......@@ -1065,7 +1068,7 @@ static void write_modes_b(AV1_COMP *cpi, const TileInfo *const tile,
aom_writer *w, TOKENEXTRA **tok,
const TOKENEXTRA *const tok_end, int mi_row,
int mi_col) {
const AV1_COMMON *const cm = &cpi->common;
AV1_COMMON *const cm = &cpi->common;
MACROBLOCKD *const xd = &cpi->td.mb.e_mbd;
MODE_INFO *m;
int plane;
......@@ -1288,7 +1291,11 @@ static void update_coef_probs_common(aom_writer *const bc, AV1_COMP *cpi,
av1_coeff_probs_model *new_coef_probs) {
av1_coeff_probs_model *old_coef_probs = cpi->common.fc->coef_probs[tx_size];
const aom_prob upd = DIFF_UPDATE_PROB;
#if CONFIG_EC_ADAPT
const int entropy_nodes_update = ONE_TOKEN;
#else
const int entropy_nodes_update = UNCONSTRAINED_NODES;
#endif
int i, j, k, l, t;
int stepsize = cpi->sf.coeff_prob_appx_step;
#if CONFIG_TILE_GROUPS
......@@ -1579,6 +1586,7 @@ static void encode_segmentation(AV1_COMMON *cm, MACROBLOCKD *xd,
}
}
#if !CONFIG_EC_ADAPT || !CONFIG_DAALA_EC
static void update_seg_probs(AV1_COMP *cpi, aom_writer *w) {
AV1_COMMON *cm = &cpi->common;
#if CONFIG_TILE_GROUPS
......@@ -1607,6 +1615,7 @@ static void update_seg_probs(AV1_COMP *cpi, aom_writer *w) {
cm->fc->seg.tree_cdf);
#endif
}
#endif // CONFIG_EC_ADAPT,CONFIG_DAALA_EC
static void write_txfm_mode(TX_MODE mode, struct aom_write_bit_buffer *wb) {
aom_wb_write_bit(wb, mode == TX_MODE_SELECT);
......@@ -2150,6 +2159,7 @@ static size_t write_compressed_header(AV1_COMP *cpi, uint8_t *data) {
FRAME_COUNTS *counts = cpi->td.counts;
aom_writer *header_bc;
int i, j;
#if CONFIG_TILE_GROUPS
const int probwt = cm->num_tg;
#else
......@@ -2168,11 +2178,14 @@ static size_t write_compressed_header(AV1_COMP *cpi, uint8_t *data) {
#endif
update_txfm_probs(cm, header_bc, counts);
update_coef_probs(cpi, header_bc);
update_skip_probs(cm, header_bc, counts);
#if CONFIG_DELTA_Q
update_delta_q_probs(cm, header_bc, counts);
#endif
#if !CONFIG_EC_ADAPT || !CONFIG_DAALA_EC
update_seg_probs(cpi, header_bc);
for (i = 0; i < INTRA_MODES; ++i) {
......@@ -2192,12 +2205,15 @@ static size_t write_compressed_header(AV1_COMP *cpi, uint8_t *data) {
cm->fc->partition_cdf[i]);
#endif
}
#endif // CONFIG_EC_ADAPT, CONFIG_DAALA_EC
if (frame_is_intra_only(cm)) {
av1_copy(cm->kf_y_prob, av1_kf_y_mode_prob);
#if CONFIG_DAALA_EC
av1_copy(cm->kf_y_cdf, av1_kf_y_mode_cdf);
#endif
#if !CONFIG_EC_ADAPT || !CONFIG_DAALA_EC
for (i = 0; i < INTRA_MODES; ++i)
for (j = 0; j < INTRA_MODES; ++j) {
prob_diff_update(av1_intra_mode_tree, cm->kf_y_prob[i][j],
......@@ -2208,7 +2224,9 @@ static size_t write_compressed_header(AV1_COMP *cpi, uint8_t *data) {
cm->kf_y_cdf[i][j]);
#endif
}
#endif // CONFIG_EC_ADAPT, CONFIG_DAALA_EC
} else {
#if !CONFIG_EC_ADAPT || !CONFIG_DAALA_EC
#if CONFIG_REF_MV
update_inter_mode_probs(cm, header_bc, counts);
#else
......@@ -2221,6 +2239,7 @@ static size_t write_compressed_header(AV1_COMP *cpi, uint8_t *data) {
#endif
}
#endif
#endif // CONFIG_EC_ADAPT, CONFIG_DAALA_EC
#if CONFIG_MOTION_VAR
for (i = 0; i < BLOCK_SIZES; ++i)
if (is_motion_variation_allowed_bsize(i))
......@@ -2228,8 +2247,10 @@ static size_t write_compressed_header(AV1_COMP *cpi, uint8_t *data) {
counts->motion_mode[i], MOTION_MODES, probwt,
header_bc);
#endif // CONFIG_MOTION_VAR
#if !CONFIG_EC_ADAPT || !CONFIG_DAALA_EC
if (cm->interp_filter == SWITCHABLE)
update_switchable_interp_probs(cm, header_bc, counts);
#endif
for (i = 0; i < INTRA_INTER_CONTEXTS; i++)
av1_cond_prob_diff_update(header_bc, &fc->intra_inter_prob[i],
......@@ -2248,7 +2269,6 @@ static size_t write_compressed_header(AV1_COMP *cpi, uint8_t *data) {
for (j = 0; j < (SINGLE_REFS - 1); j++)
av1_cond_prob_diff_update(header_bc, &fc->single_ref_prob[i][j],
counts->single_ref[i][j], probwt);
if (cm->reference_mode != SINGLE_REFERENCE)
#if CONFIG_EXT_REFS
for (i = 0; i < REF_CONTEXTS; i++) {
......@@ -2265,6 +2285,7 @@ static size_t write_compressed_header(AV1_COMP *cpi, uint8_t *data) {
counts->comp_ref[i], probwt);
#endif // CONFIG_EXT_REFS
#if !CONFIG_EC_ADAPT || !CONFIG_DAALA_EC
for (i = 0; i < BLOCK_SIZE_GROUPS; ++i) {
prob_diff_update(av1_intra_mode_tree, cm->fc->y_mode_prob[i],
counts->y_mode[i], INTRA_MODES, probwt, header_bc);
......@@ -2273,6 +2294,7 @@ static size_t write_compressed_header(AV1_COMP *cpi, uint8_t *data) {
cm->fc->y_mode_cdf[i]);
#endif
}
#endif // CONFIG_EC_ADAPT, CONFIG_DAALA_EC
av1_write_nmv_probs(cm, cm->allow_high_precision_mv, header_bc,
#if CONFIG_REF_MV
......@@ -2280,11 +2302,9 @@ static size_t write_compressed_header(AV1_COMP *cpi, uint8_t *data) {
#else
&counts->mv);
#endif
#if CONFIG_DAALA_EC
av1_tree_to_cdf(av1_mv_joint_tree, cm->fc->nmvc.joints,