Commit 033e5368 authored by Timothy B. Terriberry's avatar Timothy B. Terriberry Committed by Tim Terriberry
Browse files

daala_ec: Remove non-dyadic functions.

Encoder output should not change, and all streams should remain
decodable without decoder changes.

Change-Id: Id1f1b0f2f02c3b46f150a93c451bf48abd0782ca
parent dd6fa06a
......@@ -54,8 +54,6 @@
aom_read_cdf_(r, cdf, nsymbs ACCT_STR_ARG(ACCT_STR_NAME))
#define aom_read_symbol(r, cdf, nsymbs, ACCT_STR_NAME) \
aom_read_symbol_(r, cdf, nsymbs ACCT_STR_ARG(ACCT_STR_NAME))
#define aom_read_cdf_unscaled(r, cdf, nsymbs, ACCT_STR_NAME) \
aom_read_cdf_unscaled_(r, cdf, nsymbs ACCT_STR_ARG(ACCT_STR_NAME))
#ifdef __cplusplus
extern "C" {
......@@ -238,24 +236,6 @@ static INLINE int aom_read_symbol_(aom_reader *r, aom_cdf_prob *cdf,
return ret;
}
#if CONFIG_PVQ
static INLINE int aom_read_cdf_unscaled_(aom_reader *r, const aom_cdf_prob *cdf,
int nsymbs ACCT_STR_PARAM) {
int ret;
#if CONFIG_DAALA_EC
ret = od_ec_decode_cdf_unscaled(&r->ec, cdf, nsymbs);
#else
#error "CONFIG_PVQ currently requires CONFIG_DAALA_EC."
#endif
#if CONFIG_ACCOUNTING
if (ACCT_STR_NAME) aom_process_accounting(r, ACCT_STR_NAME);
aom_update_symb_counts(r, (nsymbs == 2));
#endif
return ret;
}
#endif
static INLINE int aom_read_tree_as_cdf(aom_reader *r,
const aom_tree_index *tree,
const aom_prob *probs) {
......
......@@ -184,17 +184,6 @@ static INLINE void aom_write_symbol(aom_writer *w, int symb, aom_cdf_prob *cdf,
#endif
}
#if CONFIG_PVQ
static INLINE void aom_write_cdf_unscaled(aom_writer *w, int symb,
const aom_cdf_prob *cdf, int nsymbs) {
#if CONFIG_DAALA_EC
od_ec_encode_cdf_unscaled(&w->ec, symb, cdf, nsymbs);
#else
#error "CONFIG_PVQ currently requires CONFIG_DAALA_EC."
#endif
}
#endif
static INLINE void aom_write_tree_as_cdf(aom_writer *w,
const aom_tree_index *tree,
const aom_prob *probs, int bits,
......
......@@ -15,74 +15,12 @@
#include <stddef.h>
#include "av1/common/odintrin.h"
/*Set this flag 1 to enable a "reduced overhead" version of the entropy coder.
This uses a partition function that more accurately follows the input
probability estimates at the expense of some additional CPU cost (though
still an order of magnitude less than a full division).
In classic arithmetic coding, the partition function maps a value x in the
range [0, ft] to a value in y in [0, r] with 0 < ft <= r via
y = x*r/ft.
Any deviation from this value increases coding inefficiency.
To avoid divisions, we require ft <= r < 2*ft (enforcing it by shifting up
ft if necessary), and replace that function with
y = x + OD_MINI(x, r - ft).
This counts values of x smaller than r - ft double compared to values larger
than r - ft, which over-estimates the probability of symbols at the start of
the alphabet, and under-estimates the probability of symbols at the end of
the alphabet.
The overall coding inefficiency assuming accurate probability models and
independent symbols is in the 1% range, which is similar to that of CABAC.
To reduce overhead even further, we split this into two cases:
1) r - ft > ft - (r - ft).
That is, we have more values of x that are double-counted than
single-counted.
In this case, we still double-count the first 2*r - 3*ft values of x, but
after that we alternate between single-counting and double-counting for
the rest.
2) r - ft < ft - (r - ft).
That is, we have more values of x that are single-counted than
double-counted.
In this case, we alternate between single-counting and double-counting for
the first 2*(r - ft) values of x, and single-count the rest.
For two equiprobable symbols in different places in the alphabet, this
reduces the maximum ratio of over-estimation to under-estimation from 2:1
for the previous partition function to either 4:3 or 3:2 (for each of the
two cases above, respectively), assuming symbol probabilities significantly
greater than 1/32768.
That reduces the worst-case per-symbol overhead from 1 bit to 0.58 bits.
The resulting function is
e = OD_MAXI(2*r - 3*ft, 0);
y = x + OD_MINI(x, e) + OD_MINI(OD_MAXI(x - e, 0) >> 1, r - ft).
Here, e is a value that is greater than 0 in case 1, and 0 in case 2.
This function is about 3 times as expensive to evaluate as the high-overhead
version, but still an order of magnitude cheaper than a division, since it
is composed only of very simple operations.
Because we want to fit in 16-bit registers and must use unsigned values to do
so, we use saturating subtraction to enforce the maximums with 0.
Enabling this reduces the measured overhead in ectest from 0.805% to 0.621%
(vs. 0.022% for the division-based partition function with r much greater
than ft).
It improves performance on ntt-short-1 by about 0.3%.*/
#define OD_EC_REDUCED_OVERHEAD (1)
/*OPT: od_ec_window must be at least 32 bits, but if you have fast arithmetic
on a larger type, you can speed up the decoder by using it here.*/
typedef uint32_t od_ec_window;
#define OD_EC_WINDOW_SIZE ((int)sizeof(od_ec_window) * CHAR_BIT)
/*Unsigned subtraction with unsigned saturation.
This implementation of the macro is intentionally chosen to increase the
number of common subexpressions in the reduced-overhead partition function.
This matters for C code, but it would not for hardware with a saturating
subtraction instruction.*/
#define OD_SUBSATU(a, b) ((a)-OD_MINI(a, b))
/*The number of bits to use for the range-coded part of unsigned integers.*/
#define OD_EC_UINT_BITS (4)
......
......@@ -139,54 +139,7 @@ void od_ec_dec_init(od_ec_dec *dec, const unsigned char *buf,
od_ec_dec_refill(dec);
}
/*Decode a bit that has an fz/ft probability of being a zero.
fz: The probability that the bit is zero, scaled by _ft.
ft: The total probability.
This must be at least 16384 and no more than 32768.
Return: The value decoded (0 or 1).*/
int od_ec_decode_bool(od_ec_dec *dec, unsigned fz, unsigned ft) {
od_ec_window dif;
od_ec_window vw;
unsigned r;
int s;
unsigned v;
int ret;
OD_ASSERT(0 < fz);
OD_ASSERT(fz < ft);
OD_ASSERT(16384 <= ft);
OD_ASSERT(ft <= 32768U);
dif = dec->dif;
r = dec->rng;
OD_ASSERT(dif >> (OD_EC_WINDOW_SIZE - 16) < r);
OD_ASSERT(ft <= r);
s = r - ft >= ft;
ft <<= s;
fz <<= s;
OD_ASSERT(r - ft < ft);
#if OD_EC_REDUCED_OVERHEAD
{
unsigned d;
unsigned e;
d = r - ft;
e = OD_SUBSATU(2 * d, ft);
v = fz + OD_MINI(fz, e) + OD_MINI(OD_SUBSATU(fz, e) >> 1, d);
}
#else
v = fz + OD_MINI(fz, r - ft);
#endif
vw = (od_ec_window)v << (OD_EC_WINDOW_SIZE - 16);
ret = dif >= vw;
if (ret) dif -= vw;
r = ret ? r - v : v;
return od_ec_dec_normalize(dec, dif, r, ret);
}
/*Decode a bit that has an fz probability of being a zero in Q15.
This is a simpler, lower overhead version of od_ec_decode_bool() for use when
ft == 32768.
To be decoded properly by this function, symbols cannot have been encoded by
od_ec_encode(), but must have been encoded with one of the equivalent _q15()
or _dyadic() functions instead.
fz: The probability that the bit is zero, scaled by 32768.
Return: The value decoded (0 or 1).*/
int od_ec_decode_bool_q15(od_ec_dec *dec, unsigned fz) {
......@@ -218,148 +171,7 @@ int od_ec_decode_bool_q15(od_ec_dec *dec, unsigned fz) {
return od_ec_dec_normalize(dec, dif, r_new, ret);
}
/*Decodes a symbol given a cumulative distribution function (CDF) table.
cdf: The CDF, such that symbol s falls in the range
[s > 0 ? cdf[s - 1] : 0, cdf[s]).
The values must be monotonically non-increasing, and cdf[nsyms - 1]
must be at least 16384, and no more than 32768.
nsyms: The number of symbols in the alphabet.
This should be at most 16.
Return: The decoded symbol s.*/
int od_ec_decode_cdf(od_ec_dec *dec, const uint16_t *cdf, int nsyms) {
od_ec_window dif;
unsigned r;
unsigned c;
unsigned d;
#if OD_EC_REDUCED_OVERHEAD
unsigned e;
#endif
int s;
unsigned u;
unsigned v;
unsigned q;
unsigned fl;
unsigned fh;
unsigned ft;
int ret;
dif = dec->dif;
r = dec->rng;
OD_ASSERT(dif >> (OD_EC_WINDOW_SIZE - 16) < r);
OD_ASSERT(nsyms > 0);
ft = cdf[nsyms - 1];
OD_ASSERT(16384 <= ft);
OD_ASSERT(ft <= 32768U);
OD_ASSERT(ft <= r);
s = r - ft >= ft;
ft <<= s;
d = r - ft;
OD_ASSERT(d < ft);
c = (unsigned)(dif >> (OD_EC_WINDOW_SIZE - 16));
q = OD_MAXI((int)(c >> 1), (int)(c - d));
#if OD_EC_REDUCED_OVERHEAD
e = OD_SUBSATU(2 * d, ft);
/*The correctness of this inverse partition function is not obvious, but it
was checked exhaustively for all possible values of r, ft, and c.
TODO: It should be possible to optimize this better than the compiler,
given that we do not care about the accuracy of negative results (as we
will not use them).
It would also be nice to get rid of the 32-bit dividend, as it requires a
32x32->64 bit multiply to invert.*/
q = OD_MAXI((int)q, (int)((2 * (int32_t)c + 1 - (int32_t)e) / 3));
#endif
q >>= s;
OD_ASSERT(q<ft>> s);
fl = 0;
ret = 0;
for (fh = cdf[ret]; fh <= q; fh = cdf[++ret]) fl = fh;
OD_ASSERT(fh <= ft >> s);
fl <<= s;
fh <<= s;
#if OD_EC_REDUCED_OVERHEAD
u = fl + OD_MINI(fl, e) + OD_MINI(OD_SUBSATU(fl, e) >> 1, d);
v = fh + OD_MINI(fh, e) + OD_MINI(OD_SUBSATU(fh, e) >> 1, d);
#else
u = fl + OD_MINI(fl, d);
v = fh + OD_MINI(fh, d);
#endif
r = v - u;
dif -= (od_ec_window)u << (OD_EC_WINDOW_SIZE - 16);
return od_ec_dec_normalize(dec, dif, r, ret);
}
/*Decodes a symbol given a cumulative distribution function (CDF) table.
cdf: The CDF, such that symbol s falls in the range
[s > 0 ? cdf[s - 1] : 0, cdf[s]).
The values must be monotonically non-increasing, and cdf[nsyms - 1]
must be at least 2, and no more than 32768.
nsyms: The number of symbols in the alphabet.
This should be at most 16.
Return: The decoded symbol s.*/
int od_ec_decode_cdf_unscaled(od_ec_dec *dec, const uint16_t *cdf, int nsyms) {
od_ec_window dif;
unsigned r;
unsigned c;
unsigned d;
#if OD_EC_REDUCED_OVERHEAD
unsigned e;
#endif
int s;
unsigned u;
unsigned v;
unsigned q;
unsigned fl;
unsigned fh;
unsigned ft;
int ret;
dif = dec->dif;
r = dec->rng;
OD_ASSERT(dif >> (OD_EC_WINDOW_SIZE - 16) < r);
OD_ASSERT(nsyms > 0);
ft = cdf[nsyms - 1];
OD_ASSERT(2 <= ft);
OD_ASSERT(ft <= 32768U);
s = 15 - OD_ILOG_NZ(ft - 1);
ft <<= s;
OD_ASSERT(ft <= r);
if (r - ft >= ft) {
ft <<= 1;
s++;
}
d = r - ft;
OD_ASSERT(d < ft);
c = (unsigned)(dif >> (OD_EC_WINDOW_SIZE - 16));
q = OD_MAXI((int)(c >> 1), (int)(c - d));
#if OD_EC_REDUCED_OVERHEAD
e = OD_SUBSATU(2 * d, ft);
/*TODO: See TODO above.*/
q = OD_MAXI((int)q, (int)((2 * (int32_t)c + 1 - (int32_t)e) / 3));
#endif
q >>= s;
OD_ASSERT(q<ft>> s);
fl = 0;
ret = 0;
for (fh = cdf[ret]; fh <= q; fh = cdf[++ret]) fl = fh;
OD_ASSERT(fh <= ft >> s);
fl <<= s;
fh <<= s;
#if OD_EC_REDUCED_OVERHEAD
u = fl + OD_MINI(fl, e) + OD_MINI(OD_SUBSATU(fl, e) >> 1, d);
v = fh + OD_MINI(fh, e) + OD_MINI(OD_SUBSATU(fh, e) >> 1, d);
#else
u = fl + OD_MINI(fl, d);
v = fh + OD_MINI(fh, d);
#endif
r = v - u;
dif -= (od_ec_window)u << (OD_EC_WINDOW_SIZE - 16);
return od_ec_dec_normalize(dec, dif, r, ret);
}
/*Decodes a symbol given a cumulative distribution function (CDF) table in Q15.
This is a simpler, lower overhead version of od_ec_decode_cdf() for use when
cdf[nsyms - 1] == 32768.
To be decoded properly by this function, symbols cannot have been encoded by
od_ec_encode(), but must have been encoded with one of the equivalent _q15()
or dyadic() functions instead.
cdf: The CDF, such that symbol s falls in the range
[s > 0 ? cdf[s - 1] : 0, cdf[s]).
The values must be monotonically non-increasing, and cdf[nsyms - 1]
......
......@@ -63,20 +63,11 @@ struct od_ec_dec {
void od_ec_dec_init(od_ec_dec *dec, const unsigned char *buf, uint32_t storage)
OD_ARG_NONNULL(1) OD_ARG_NONNULL(2);
OD_WARN_UNUSED_RESULT int od_ec_decode_bool(od_ec_dec *dec, unsigned fz,
unsigned ft) OD_ARG_NONNULL(1);
OD_WARN_UNUSED_RESULT int od_ec_decode_bool_q15(od_ec_dec *dec, unsigned fz)
OD_ARG_NONNULL(1);
OD_WARN_UNUSED_RESULT int od_ec_decode_cdf(od_ec_dec *dec, const uint16_t *cdf,
int nsyms) OD_ARG_NONNULL(1)
OD_ARG_NONNULL(2);
OD_WARN_UNUSED_RESULT int od_ec_decode_cdf_q15(od_ec_dec *dec,
const uint16_t *cdf, int nsyms)
OD_ARG_NONNULL(1) OD_ARG_NONNULL(2);
OD_WARN_UNUSED_RESULT int od_ec_decode_cdf_unscaled(od_ec_dec *dec,
const uint16_t *cdf,
int nsyms) OD_ARG_NONNULL(1)
OD_ARG_NONNULL(2);
OD_WARN_UNUSED_RESULT uint32_t od_ec_dec_bits_(od_ec_dec *dec, unsigned ftb)
OD_ARG_NONNULL(1);
......
......@@ -142,67 +142,7 @@ void od_ec_enc_clear(od_ec_enc *enc) {
free(enc->buf);
}
/*Encodes a symbol given its scaled frequency information.
The frequency information must be discernable by the decoder, assuming it
has read only the previous symbols from the stream.
You can change the frequency information, or even the entire source alphabet,
so long as the decoder can tell from the context of the previously encoded
information that it is supposed to do so as well.
fl: The cumulative frequency of all symbols that come before the one to be
encoded.
fh: The cumulative frequency of all symbols up to and including the one to
be encoded.
Together with fl, this defines the range [fl, fh) in which the decoded
value will fall.
ft: The sum of the frequencies of all the symbols.
This must be at least 16384, and no more than 32768.*/
static void od_ec_encode(od_ec_enc *enc, unsigned fl, unsigned fh,
unsigned ft) {
od_ec_window l;
unsigned r;
int s;
unsigned d;
unsigned u;
unsigned v;
OD_ASSERT(fl < fh);
OD_ASSERT(fh <= ft);
OD_ASSERT(16384 <= ft);
OD_ASSERT(ft <= 32768U);
l = enc->low;
r = enc->rng;
OD_ASSERT(ft <= r);
s = r - ft >= ft;
ft <<= s;
fl <<= s;
fh <<= s;
d = r - ft;
OD_ASSERT(d < ft);
#if OD_EC_REDUCED_OVERHEAD
{
unsigned e;
e = OD_SUBSATU(2 * d, ft);
u = fl + OD_MINI(fl, e) + OD_MINI(OD_SUBSATU(fl, e) >> 1, d);
v = fh + OD_MINI(fh, e) + OD_MINI(OD_SUBSATU(fh, e) >> 1, d);
}
#else
u = fl + OD_MINI(fl, d);
v = fh + OD_MINI(fh, d);
#endif
r = v - u;
l += u;
od_ec_enc_normalize(enc, l, r);
#if OD_MEASURE_EC_OVERHEAD
enc->entropy -= OD_LOG2((double)(fh - fl) / ft);
enc->nb_symbols++;
#endif
}
/*Encodes a symbol given its frequency in Q15.
This is like od_ec_encode() when ft == 32768, but is simpler and has lower
overhead.
Symbols encoded with this function cannot be properly decoded with
od_ec_decode(), and must be decoded with one of the equivalent _q15()
functions instead.
fl: The cumulative frequency of all symbols that come before the one to be
encoded.
fh: The cumulative frequency of all symbols up to and including the one to
......@@ -239,73 +179,7 @@ static void od_ec_encode_q15(od_ec_enc *enc, unsigned fl, unsigned fh) {
#endif
}
/*Encodes a symbol given its frequency information with an arbitrary scale.
This operates just like od_ec_encode(), but does not require that ft be at
least 16384.
fl: The cumulative frequency of all symbols that come before the one to be
encoded.
fh: The cumulative frequency of all symbols up to and including the one to
be encoded.
ft: The sum of the frequencies of all the symbols.
This must be at least 2 and no more than 32768.*/
static void od_ec_encode_unscaled(od_ec_enc *enc, unsigned fl, unsigned fh,
unsigned ft) {
int s;
OD_ASSERT(fl < fh);
OD_ASSERT(fh <= ft);
OD_ASSERT(2 <= ft);
OD_ASSERT(ft <= 32768U);
s = 15 - OD_ILOG_NZ(ft - 1);
od_ec_encode(enc, fl << s, fh << s, ft << s);
}
/*Encode a bit that has an fz/ft probability of being a zero.
val: The value to encode (0 or 1).
fz: The probability that val is zero, scaled by ft.
ft: The total probability.
This must be at least 16384 and no more than 32768.*/
void od_ec_encode_bool(od_ec_enc *enc, int val, unsigned fz, unsigned ft) {
od_ec_window l;
unsigned r;
int s;
unsigned v;
OD_ASSERT(0 < fz);
OD_ASSERT(fz < ft);
OD_ASSERT(16384 <= ft);
OD_ASSERT(ft <= 32768U);
l = enc->low;
r = enc->rng;
OD_ASSERT(ft <= r);
s = r - ft >= ft;
ft <<= s;
fz <<= s;
OD_ASSERT(r - ft < ft);
#if OD_EC_REDUCED_OVERHEAD
{
unsigned d;
unsigned e;
d = r - ft;
e = OD_SUBSATU(2 * d, ft);
v = fz + OD_MINI(fz, e) + OD_MINI(OD_SUBSATU(fz, e) >> 1, d);
}
#else
v = fz + OD_MINI(fz, r - ft);
#endif
if (val) l += v;
r = val ? r - v : v;
od_ec_enc_normalize(enc, l, r);
#if OD_MEASURE_EC_OVERHEAD
enc->entropy -= OD_LOG2((double)(val ? ft - fz : fz) / ft);
enc->nb_symbols++;
#endif
}
/*Encode a bit that has an fz probability of being a zero in Q15.
This is a simpler, lower overhead version of od_ec_encode_bool() for use when
ft == 32768.
Symbols encoded with this function cannot be properly decoded with
od_ec_decode(), and must be decoded with one of the equivalent _q15()
functions instead.
val: The value to encode (0 or 1).
fz: The probability that val is zero, scaled by 32768.*/
void od_ec_encode_bool_q15(od_ec_enc *enc, int val, unsigned fz) {
......@@ -331,26 +205,7 @@ void od_ec_encode_bool_q15(od_ec_enc *enc, int val, unsigned fz) {
#endif
}
/*Encodes a symbol given a cumulative distribution function (CDF) table.
s: The index of the symbol to encode.
cdf: The CDF, such that symbol s falls in the range
[s > 0 ? cdf[s - 1] : 0, cdf[s]).
The values must be monotonically non-decreasing, and the last value
must be at least 16384, and no more than 32768.
nsyms: The number of symbols in the alphabet.
This should be at most 16.*/
void od_ec_encode_cdf(od_ec_enc *enc, int s, const uint16_t *cdf, int nsyms) {
OD_ASSERT(s >= 0);
OD_ASSERT(s < nsyms);
od_ec_encode(enc, s > 0 ? cdf[s - 1] : 0, cdf[s], cdf[nsyms - 1]);
}
/*Encodes a symbol given a cumulative distribution function (CDF) table in Q15.
This is a simpler, lower overhead version of od_ec_encode_cdf() for use when
cdf[nsyms - 1] == 32768.
Symbols encoded with this function cannot be properly decoded with
od_ec_decode(), and must be decoded with one of the equivalent _q15()
functions instead.
s: The index of the symbol to encode.
cdf: The CDF, such that symbol s falls in the range
[s > 0 ? cdf[s - 1] : 0, cdf[s]).
......@@ -367,21 +222,6 @@ void od_ec_encode_cdf_q15(od_ec_enc *enc, int s, const uint16_t *cdf,
od_ec_encode_q15(enc, s > 0 ? cdf[s - 1] : 0, cdf[s]);
}
/*Encodes a symbol given a cumulative distribution function (CDF) table.
s: The index of the symbol to encode.
cdf: The CDF, such that symbol s falls in the range
[s > 0 ? cdf[s - 1] : 0, cdf[s]).
The values must be monotonically non-decreasing, and the last value
must be at least 2, and no more than 32768.
nsyms: The number of symbols in the alphabet.
This should be at most 16.*/
void od_ec_encode_cdf_unscaled(od_ec_enc *enc, int s, const uint16_t *cdf,
int nsyms) {
OD_ASSERT(s >= 0);
OD_ASSERT(s < nsyms);
od_ec_encode_unscaled(enc, s > 0 ? cdf[s - 1] : 0, cdf[s], cdf[nsyms - 1]);
}
#if CONFIG_RAWBITS
/*Encodes a sequence of raw bits in the stream.
fl: The bits to encode.
......
......@@ -62,16 +62,10 @@ void od_ec_enc_init(od_ec_enc *enc, uint32_t size) OD_ARG_NONNULL(1);
void od_ec_enc_reset(od_ec_enc *enc) OD_ARG_NONNULL(1);
void od_ec_enc_clear(od_ec_enc *enc) OD_ARG_NONNULL(1);
void od_ec_encode_bool(od_ec_enc *enc, int val, unsigned fz, unsigned _ft)
OD_ARG_NONNULL(1);
void od_ec_encode_bool_q15(od_ec_enc *enc, int val, unsigned fz_q15)
OD_ARG_NONNULL(1);
void od_ec_encode_cdf(od_ec_enc *enc, int s, const uint16_t *cdf, int nsyms)
OD_ARG_NONNULL(1) OD_ARG_NONNULL(3);
void od_ec_encode_cdf_q15(od_ec_enc *enc, int s, const uint16_t *cdf, int nsyms)
OD_ARG_NONNULL(1) OD_ARG_NONNULL(3);
void od_ec_encode_cdf_unscaled(od_ec_enc *enc, int s, const uint16_t *cdf,
int nsyms) OD_ARG_NONNULL(1) OD_ARG_NONNULL(3);
void od_ec_enc_bits(od_ec_enc *enc, uint32_t fl, unsigned ftb)
OD_ARG_NONNULL(1);
......
......@@ -64,12 +64,6 @@ void aom_cdf_adapt_q15(int val, uint16_t *cdf, int n, int *count, int rate);
void aom_encode_cdf_adapt_q15(aom_writer *w, int val, uint16_t *cdf, int n,
int *count, int rate);
void aom_encode_cdf_adapt(aom_writer *w, int val, uint16_t *cdf, int n,
int increment);
int aom_decode_cdf_adapt_(aom_reader *r, uint16_t *cdf, int n,
int increment ACCT_STR_PARAM);
void generic_encode(aom_writer *w, generic_encoder *model, int x,
int *ex_q16, int integration);
double generic_encode_cost(generic_encoder *model, int x, int *ex_q16);
......
......@@ -48,31 +48,6 @@ int aom_decode_cdf_adapt_q15_(aom_reader *r, uint16_t *cdf, int n,
return val;
}
/** Decodes a value from 0 to N-1 (with N up to 16) based on a cdf and adapts
* the cdf accordingly.
*
* @param [in,out] enc range encoder
* @param [in] cdf CDF of the variable (Q15)
* @param [in] n number of values possible
* @param [in] increment adaptation speed (Q15)
*
* @retval decoded variable
*/
int aom_decode_cdf_adapt_(aom_reader *r, uint16_t *cdf, int n,
int increment ACCT_STR_PARAM) {
int i;
int val;
val = aom_read_cdf_unscaled(r, cdf, n, ACCT_STR_NAME);
if (cdf[n-1] + increment > 32767) {
for (i = 0; i < n; i++) {
/* Second term ensures that the pdf is non-null */
cdf[i] = (cdf[i] >> 1) + i + 1;
}
}
for (i = val; i < n; i++) cdf[i] += increment;
return val;
}
/** Encodes a random variable using a "generic" model, assuming that the
* distribution is one-sided (zero and up), has a single mode, and decays
* exponentially past the model.
......
......@@ -49,28 +49,6 @@ void aom_encode_cdf_adapt_q15(aom_writer *w, int val, uint16_t *cdf, int n,
aom_cdf_adapt_q15(val, cdf, n, count, rate);
}
/** Encodes a value from 0 to N-1 (with N up to 16) based on a cdf and adapts
* the cdf accordingly.
*