Commit d1893f64 authored by Alex Converse's avatar Alex Converse Committed by Pascal Massimino

Code DCT tokens with ANS

Change-Id: I452f9675325a5f45bfbbe3e7e135009a125539f1
parent 9ffcb469
This diff is collapsed.
......@@ -14,6 +14,9 @@
#include "vpx/vpx_integer.h"
#include "vpx_dsp/prob.h"
#if CONFIG_ANS
#include "vp10/common/ans.h"
#endif // CONFIG_ANS
#include "vp10/common/common.h"
#include "vp10/common/enums.h"
......@@ -163,6 +166,14 @@ static INLINE const uint8_t *get_band_translate(TX_SIZE tx_size) {
#define MODEL_NODES (ENTROPY_NODES - UNCONSTRAINED_NODES)
extern const vpx_tree_index vp10_coef_con_tree[TREE_SIZE(ENTROPY_TOKENS)];
extern const vpx_prob vp10_pareto8_full[COEFF_PROB_MODELS][MODEL_NODES];
#if CONFIG_ANS
extern const vpx_prob
vp10_pareto8_token_probs[COEFF_PROB_MODELS][ENTROPY_TOKENS - 2];
void vp10_build_pareto8_dec_tab(
const vpx_prob token_probs[COEFF_PROB_MODELS][ENTROPY_TOKENS - 2],
rans_dec_lut dec_tab[COEFF_PROB_MODELS]);
#endif // CONFIG_ANS
typedef vpx_prob vp10_coeff_probs_model[REF_TYPES][COEF_BANDS]
[COEFF_CONTEXTS][UNCONSTRAINED_NODES];
......
This diff is collapsed.
......@@ -115,6 +115,9 @@ VP10Decoder *vp10_decoder_create(BufferPool *const pool) {
cm->setup_mi = vp10_dec_setup_mi;
vp10_loop_filter_init(cm);
#if CONFIG_ANS
vp10_build_pareto8_dec_tab(vp10_pareto8_token_probs, pbi->token_tab);
#endif // CONFIG_ANS
cm->error.setjmp = 0;
......
......@@ -18,6 +18,9 @@
#include "vpx_scale/yv12config.h"
#include "vpx_util/vpx_thread.h"
#if CONFIG_ANS
#include "vp10/common/ans.h"
#endif
#include "vp10/common/thread_common.h"
#include "vp10/common/onyxc_int.h"
#include "vp10/common/ppflags.h"
......@@ -31,6 +34,9 @@ extern "C" {
typedef struct TileData {
VP10_COMMON *cm;
vpx_reader bit_reader;
#if CONFIG_ANS
struct AnsDecoder token_ans;
#endif // CONFIG_ANS
DECLARE_ALIGNED(16, MACROBLOCKD, xd);
/* dqcoeff are shared by all the planes. So planes must be decoded serially */
DECLARE_ALIGNED(16, tran_low_t, dqcoeff[32 * 32]);
......@@ -40,6 +46,9 @@ typedef struct TileData {
typedef struct TileWorkerData {
struct VP10Decoder *pbi;
vpx_reader bit_reader;
#if CONFIG_ANS
struct AnsDecoder token_ans;
#endif // CONFIG_ANS
FRAME_COUNTS counts;
DECLARE_ALIGNED(16, MACROBLOCKD, xd);
/* dqcoeff are shared by all the planes. So planes must be decoded serially */
......@@ -80,6 +89,9 @@ typedef struct VP10Decoder {
int inv_tile_order;
int need_resync; // wait for key/intra-only frame.
int hold_ref_buf; // hold the reference buffer.
#if CONFIG_ANS
rans_dec_lut token_tab[COEFF_PROB_MODELS];
#endif // CONFIG_ANS
} VP10Decoder;
int vp10_receive_compressed_data(struct VP10Decoder *pbi,
......
......@@ -11,6 +11,7 @@
#include "vpx_mem/vpx_mem.h"
#include "vpx_ports/mem.h"
#include "vp10/common/ans.h"
#include "vp10/common/blockd.h"
#include "vp10/common/common.h"
#include "vp10/common/entropy.h"
......@@ -38,6 +39,7 @@
++coef_counts[band][ctx][token]; \
} while (0)
#if !CONFIG_ANS
static INLINE int read_coeff(const vpx_prob *probs, int n, vpx_reader *r) {
int i, val = 0;
for (i = 0; i < n; ++i)
......@@ -207,6 +209,175 @@ static int decode_coefs(const MACROBLOCKD *xd,
return c;
}
#else // !CONFIG_ANS
static INLINE int read_coeff(const vpx_prob *const probs, int n,
struct AnsDecoder *const ans) {
int i, val = 0;
for (i = 0; i < n; ++i)
val = (val << 1) | uabs_read(ans, probs[i]);
return val;
}
static int decode_coefs_ans(const MACROBLOCKD *const xd,
const rans_dec_lut *const token_tab,
PLANE_TYPE type,
tran_low_t *dqcoeff, TX_SIZE tx_size,
const int16_t *dq,
int ctx, const int16_t *scan, const int16_t *nb,
struct AnsDecoder *const ans) {
FRAME_COUNTS *counts = xd->counts;
const int max_eob = 16 << (tx_size << 1);
const FRAME_CONTEXT *const fc = xd->fc;
const int ref = is_inter_block(&xd->mi[0]->mbmi);
int band, c = 0;
const vpx_prob (*coef_probs)[COEFF_CONTEXTS][UNCONSTRAINED_NODES] =
fc->coef_probs[tx_size][type][ref];
const vpx_prob *prob;
unsigned int (*coef_counts)[COEFF_CONTEXTS][UNCONSTRAINED_NODES + 1];
unsigned int (*eob_branch_count)[COEFF_CONTEXTS];
uint8_t token_cache[32 * 32];
const uint8_t *band_translate = get_band_translate(tx_size);
const int dq_shift = (tx_size == TX_32X32);
int v, token;
int16_t dqv = dq[0];
const uint8_t *cat1_prob;
const uint8_t *cat2_prob;
const uint8_t *cat3_prob;
const uint8_t *cat4_prob;
const uint8_t *cat5_prob;
const uint8_t *cat6_prob;
if (counts) {
coef_counts = counts->coef[tx_size][type][ref];
eob_branch_count = counts->eob_branch[tx_size][type][ref];
}
#if CONFIG_VP9_HIGHBITDEPTH
if (xd->bd > VPX_BITS_8) {
if (xd->bd == VPX_BITS_10) {
cat1_prob = vp10_cat1_prob_high10;
cat2_prob = vp10_cat2_prob_high10;
cat3_prob = vp10_cat3_prob_high10;
cat4_prob = vp10_cat4_prob_high10;
cat5_prob = vp10_cat5_prob_high10;
cat6_prob = vp10_cat6_prob_high10;
} else {
cat1_prob = vp10_cat1_prob_high12;
cat2_prob = vp10_cat2_prob_high12;
cat3_prob = vp10_cat3_prob_high12;
cat4_prob = vp10_cat4_prob_high12;
cat5_prob = vp10_cat5_prob_high12;
cat6_prob = vp10_cat6_prob_high12;
}
} else {
cat1_prob = vp10_cat1_prob;
cat2_prob = vp10_cat2_prob;
cat3_prob = vp10_cat3_prob;
cat4_prob = vp10_cat4_prob;
cat5_prob = vp10_cat5_prob;
cat6_prob = vp10_cat6_prob;
}
#else
cat1_prob = vp10_cat1_prob;
cat2_prob = vp10_cat2_prob;
cat3_prob = vp10_cat3_prob;
cat4_prob = vp10_cat4_prob;
cat5_prob = vp10_cat5_prob;
cat6_prob = vp10_cat6_prob;
#endif
while (c < max_eob) {
int val = -1;
band = *band_translate++;
prob = coef_probs[band][ctx];
if (counts)
++eob_branch_count[band][ctx];
if (!uabs_read(ans, prob[EOB_CONTEXT_NODE])) {
INCREMENT_COUNT(EOB_MODEL_TOKEN);
break;
}
while (!uabs_read(ans, prob[ZERO_CONTEXT_NODE])) {
INCREMENT_COUNT(ZERO_TOKEN);
dqv = dq[1];
token_cache[scan[c]] = 0;
++c;
if (c >= max_eob)
return c; // zero tokens at the end (no eob token)
ctx = get_coef_context(nb, token_cache, c);
band = *band_translate++;
prob = coef_probs[band][ctx];
}
token = ONE_TOKEN + rans_read(ans, token_tab[prob[PIVOT_NODE] - 1]);
INCREMENT_COUNT(ONE_TOKEN + (token > ONE_TOKEN));
switch (token) {
case ONE_TOKEN:
case TWO_TOKEN:
case THREE_TOKEN:
case FOUR_TOKEN:
val = token;
break;
case CATEGORY1_TOKEN:
val = CAT1_MIN_VAL + read_coeff(cat1_prob, 1, ans);
break;
case CATEGORY2_TOKEN:
val = CAT2_MIN_VAL + read_coeff(cat2_prob, 2, ans);
break;
case CATEGORY3_TOKEN:
val = CAT3_MIN_VAL + read_coeff(cat3_prob, 3, ans);
break;
case CATEGORY4_TOKEN:
val = CAT4_MIN_VAL + read_coeff(cat4_prob, 4, ans);
break;
case CATEGORY5_TOKEN:
val = CAT5_MIN_VAL + read_coeff(cat5_prob, 5, ans);
break;
case CATEGORY6_TOKEN:
{
const int skip_bits = TX_SIZES - 1 - tx_size;
const uint8_t *cat6p = cat6_prob + skip_bits;
#if CONFIG_VP9_HIGHBITDEPTH
switch (xd->bd) {
case VPX_BITS_8:
val = CAT6_MIN_VAL + read_coeff(cat6p, 14 - skip_bits, ans);
break;
case VPX_BITS_10:
val = CAT6_MIN_VAL + read_coeff(cat6p, 16 - skip_bits, ans);
break;
case VPX_BITS_12:
val = CAT6_MIN_VAL + read_coeff(cat6p, 18 - skip_bits, ans);
break;
default:
assert(0);
return -1;
}
#else
val = CAT6_MIN_VAL + read_coeff(cat6p, 14 - skip_bits, ans);
#endif
}
break;
}
v = (val * dqv) >> dq_shift;
#if CONFIG_COEFFICIENT_RANGE_CHECKING
#if CONFIG_VP9_HIGHBITDEPTH
dqcoeff[scan[c]] = highbd_check_range((uabs_read_bit(ans) ? -v : v),
xd->bd);
#else
dqcoeff[scan[c]] = check_range(uabs_read_bit(ans) ? -v : v);
#endif // CONFIG_VP9_HIGHBITDEPTH
#else
dqcoeff[scan[c]] = uabs_read_bit(ans) ? -v : v;
#endif // CONFIG_COEFFICIENT_RANGE_CHECKING
token_cache[scan[c]] = vp10_pt_energy_class[token];
++c;
ctx = get_coef_context(nb, token_cache, c);
dqv = dq[1];
}
return c;
}
#endif // !CONFIG_ANS
// TODO(slavarnway): Decode version of vp10_set_context. Modify vp10_set_context
// after testing is complete, then delete this version.
......@@ -280,18 +451,32 @@ void vp10_decode_palette_tokens(MACROBLOCKD *const xd, int plane,
}
}
int vp10_decode_block_tokens(MACROBLOCKD *xd,
int plane, const scan_order *sc,
int x, int y,
TX_SIZE tx_size, vpx_reader *r,
int seg_id) {
int vp10_decode_block_tokens(MACROBLOCKD *const xd,
#if CONFIG_ANS
const rans_dec_lut *const token_tab,
#endif // CONFIG_ANS
int plane, const scan_order *sc,
int x, int y,
TX_SIZE tx_size,
#if CONFIG_ANS
struct AnsDecoder *const r,
#else
vpx_reader *r,
#endif // CONFIG_ANS
int seg_id) {
struct macroblockd_plane *const pd = &xd->plane[plane];
const int16_t *const dequant = pd->seg_dequant[seg_id];
const int ctx = get_entropy_context(tx_size, pd->above_context + x,
pd->left_context + y);
#if !CONFIG_ANS
const int eob = decode_coefs(xd, pd->plane_type,
pd->dqcoeff, tx_size,
dequant, ctx, sc->scan, sc->neighbors, r);
#else
const int eob = decode_coefs_ans(xd, token_tab, pd->plane_type,
pd->dqcoeff, tx_size,
dequant, ctx, sc->scan, sc->neighbors, r);
#endif // !CONFIG_ANS
dec_set_contexts(xd, pd, tx_size, eob > 0, x, y);
return eob;
}
......
......@@ -12,8 +12,8 @@
#ifndef VP10_DECODER_DETOKENIZE_H_
#define VP10_DECODER_DETOKENIZE_H_
#include "vpx_dsp/bitreader.h"
#include "vp10/decoder/decoder.h"
#include "vp10/common/ans.h"
#include "vp10/common/scan.h"
#ifdef __cplusplus
......@@ -22,11 +22,19 @@ extern "C" {
void vp10_decode_palette_tokens(MACROBLOCKD *const xd, int plane,
vpx_reader *r);
int vp10_decode_block_tokens(MACROBLOCKD *xd,
int plane, const scan_order *sc,
int x, int y,
TX_SIZE tx_size, vpx_reader *r,
int seg_id);
int vp10_decode_block_tokens(MACROBLOCKD *const xd,
#if CONFIG_ANS
const rans_dec_lut *const token_tab,
#endif // CONFIG_ANS
int plane, const scan_order *sc,
int x, int y,
TX_SIZE tx_size,
#if CONFIG_ANS
struct AnsDecoder *const r,
#else
vpx_reader *r,
#endif // CONFIG_ANS
int seg_id);
#ifdef __cplusplus
} // extern "C"
......
......@@ -402,6 +402,7 @@ static void update_supertx_probs(VP10_COMMON *cm, vpx_writer *w) {
}
#endif // CONFIG_SUPERTX
#if !CONFIG_ANS
static void pack_mb_tokens(vpx_writer *w,
TOKENEXTRA **tp, const TOKENEXTRA *const stop,
vpx_bit_depth_t bit_depth, const TX_SIZE tx) {
......@@ -486,6 +487,71 @@ static void pack_mb_tokens(vpx_writer *w,
*tp = p;
}
#else
// This function serializes the tokens backwards both in token order and
// bit order in each token.
static void pack_mb_tokens_ans(struct AnsCoder *const ans,
const TOKENEXTRA *const start,
const TOKENEXTRA *const stop,
vpx_bit_depth_t bit_depth) {
const TOKENEXTRA *p;
TX_SIZE tx_size = TX_SIZES;
for (p = stop - 1; p >= start; --p) {
const int t = p->token;
if (t == EOSB_TOKEN) {
tx_size = (TX_SIZE)p->extra;
} else {
#if CONFIG_VP9_HIGHBITDEPTH
const vp10_extra_bit *const b =
(bit_depth == VPX_BITS_12) ? &vp10_extra_bits_high12[t] :
(bit_depth == VPX_BITS_10) ? &vp10_extra_bits_high10[t] :
&vp10_extra_bits[t];
#else
const vp10_extra_bit *const b = &vp10_extra_bits[t];
(void) bit_depth;
#endif // CONFIG_VP9_HIGHBITDEPTH
if (t != EOB_TOKEN && t != ZERO_TOKEN) {
// Write extra bits first
const int e = p->extra;
const int l = b->len;
const int skip_bits = (t == CATEGORY6_TOKEN) ? TX_SIZES - 1 - tx_size : 0;
assert(tx_size < TX_SIZES);
uabs_write(ans, e & 1, 128);
if (l) {
const int v = e >> 1;
int n;
for (n = 0; n < l - skip_bits; ++n) {
const int bb = (v >> n) & 1;
uabs_write(ans, bb, b->prob[l - 1 - n]);
}
for (; n < l; ++n) {
assert(((v >> n) & 1) == 0);
}
}
{
struct rans_sym s;
int j;
const vpx_prob *token_probs =
vp10_pareto8_token_probs[p->context_tree[PIVOT_NODE] - 1];
s.cum_prob = 0;
for (j = ONE_TOKEN; j < t; ++j) {
s.cum_prob += token_probs[j - ONE_TOKEN];
}
s.prob = token_probs[t - ONE_TOKEN];
rans_write(ans, &s);
}
}
if (t != EOB_TOKEN)
uabs_write(ans, t != ZERO_TOKEN, p->context_tree[1]);
if (!p->skip_eob_node)
uabs_write(ans, t != EOB_TOKEN, p->context_tree[0]);
}
}
}
#endif // !CONFIG_ANS
#if CONFIG_VAR_TX
static void pack_txb_tokens(vpx_writer *w,
......@@ -973,6 +1039,11 @@ static void write_modes_b(VP10_COMP *cpi, const TileInfo *const tile,
MACROBLOCKD *const xd = &cpi->td.mb.e_mbd;
MODE_INFO *m;
int plane;
#if CONFIG_ANS
(void) tok;
(void) tok_end;
(void) plane;
#endif // !CONFIG_ANS
xd->mi = cm->mi_grid_visible + (mi_row * cm->mi_stride + mi_col);
m = xd->mi[0];
......@@ -1008,6 +1079,7 @@ static void write_modes_b(VP10_COMP *cpi, const TileInfo *const tile,
if (supertx_enabled) return;
#endif // CONFIG_SUPERTX
#if !CONFIG_ANS
if (!m->mbmi.skip) {
assert(*tok < tok_end);
for (plane = 0; plane < MAX_MB_PLANE; ++plane) {
......@@ -1054,6 +1126,7 @@ static void write_modes_b(VP10_COMP *cpi, const TileInfo *const tile,
(*tok)++;
}
}
#endif
}
static void write_partition(const VP10_COMMON *const cm,
......@@ -1692,7 +1765,10 @@ static int get_refresh_mask(VP10_COMP *cpi) {
static size_t encode_tiles(VP10_COMP *cpi, uint8_t *data_ptr,
unsigned int *max_tile_sz) {
VP10_COMMON *const cm = &cpi->common;
vpx_writer residual_bc;
vpx_writer mode_bc;
#if CONFIG_ANS
struct AnsCoder token_ans;
#endif
int tile_row, tile_col;
TOKENEXTRA *tok_end;
size_t total_size = 0;
......@@ -1710,32 +1786,49 @@ static size_t encode_tiles(VP10_COMP *cpi, uint8_t *data_ptr,
for (tile_row = 0; tile_row < tile_rows; tile_row++) {
for (tile_col = 0; tile_col < tile_cols; tile_col++) {
int tile_idx = tile_row * tile_cols + tile_col;
int put_tile_size = tile_col < tile_cols - 1 || tile_row < tile_rows - 1;
uint8_t *const mode_data_start =
data_ptr + total_size + (put_tile_size ? 4 : 0);
int token_section_size;
TOKENEXTRA *tok = cpi->tile_tok[tile_row][tile_col];
tok_end = cpi->tile_tok[tile_row][tile_col] +
cpi->tok_count[tile_row][tile_col];
if (tile_col < tile_cols - 1 || tile_row < tile_rows - 1)
vpx_start_encode(&residual_bc, data_ptr + total_size + 4);
else
vpx_start_encode(&residual_bc, data_ptr + total_size);
vpx_start_encode(&mode_bc, mode_data_start);
#if !CONFIG_ANS
(void) token_section_size;
write_modes(cpi, &cpi->tile_data[tile_idx].tile_info,
&residual_bc, &tok, tok_end);
&mode_bc, &tok, tok_end);
assert(tok == tok_end);
vpx_stop_encode(&residual_bc);
if (tile_col < tile_cols - 1 || tile_row < tile_rows - 1) {
vpx_stop_encode(&mode_bc);
if (put_tile_size) {
unsigned int tile_sz;
// size of this tile
assert(residual_bc.pos > 0);
tile_sz = residual_bc.pos - 1;
assert(mode_bc.pos > 0);
tile_sz = mode_bc.pos - 1;
mem_put_le32(data_ptr + total_size, tile_sz);
max_tile = max_tile > tile_sz ? max_tile : tile_sz;
total_size += 4;
}
total_size += residual_bc.pos;
total_size += mode_bc.pos;
#else
write_modes(cpi, &cpi->tile_data[tile_idx].tile_info, &mode_bc,
NULL, NULL);
vpx_stop_encode(&mode_bc);
ans_write_init(&token_ans, mode_data_start + mode_bc.pos);
pack_mb_tokens_ans(&token_ans, tok, tok_end, cm->bit_depth);
token_section_size = ans_write_end(&token_ans);
if (put_tile_size) {
// size of this tile
mem_put_be32(data_ptr + total_size,
4 + mode_bc.pos + token_section_size);
total_size += 4;
}
total_size += mode_bc.pos + token_section_size;
#endif // !CONFIG_ANS
}
}
*max_tile_sz = max_tile;
......
......@@ -10,6 +10,7 @@
#include <assert.h>
#include "vp10/encoder/cost.h"
#include "vp10/common/entropy.h"
const unsigned int vp10_prob_cost[256] = {
2047, 2047, 1791, 1641, 1535, 1452, 1385, 1328, 1279, 1235, 1196, 1161,
......@@ -51,6 +52,22 @@ static void cost(int *costs, vpx_tree tree, const vpx_prob *probs,
}
}
#if CONFIG_ANS
void vp10_cost_tokens_ans(int *costs, const vpx_prob *tree_probs,
const vpx_prob *token_probs, int skip_eob) {
int c_tree = 0; // Cost of the "tree" nodes EOB and ZERO.
int i;
costs[EOB_TOKEN] = vp10_cost_bit(tree_probs[0], 0);
if (!skip_eob)
c_tree = vp10_cost_bit(tree_probs[0], 1);
costs[ZERO_TOKEN] = c_tree + vp10_cost_bit(tree_probs[1], 0);
c_tree += vp10_cost_bit(tree_probs[1], 1);
for (i = ONE_TOKEN; i <= CATEGORY6_TOKEN; ++i) {
costs[i] = c_tree + vp10_cost_bit(token_probs[i - ONE_TOKEN], 0);
}
}
#endif // CONFIG_ANS
void vp10_cost_tokens(int *costs, const vpx_prob *probs, vpx_tree tree) {
cost(costs, tree, probs, 0, 0);
}
......
......@@ -48,6 +48,11 @@ static INLINE int treed_cost(vpx_tree tree, const vpx_prob *probs,
void vp10_cost_tokens(int *costs, const vpx_prob *probs, vpx_tree tree);
void vp10_cost_tokens_skip(int *costs, const vpx_prob *probs, vpx_tree tree);
#if CONFIG_ANS
void vp10_cost_tokens_ans(int *costs, const vpx_prob *tree_probs,
const vpx_prob *token_probs, int skip_eob);
#endif
#ifdef __cplusplus
} // extern "C"
#endif
......
......@@ -136,12 +136,21 @@ static void fill_token_costs(vp10_coeff_cost *c,
for (j = 0; j < REF_TYPES; ++j)
for (k = 0; k < COEF_BANDS; ++k)
for (l = 0; l < BAND_COEFF_CONTEXTS(k); ++l) {
#if CONFIG_ANS
const vpx_prob *const tree_probs = p[t][i][j][k][l];
vpx_prob pivot = tree_probs[PIVOT_NODE];
vp10_cost_tokens_ans((int *)c[t][i][j][k][0][l], tree_probs,
vp10_pareto8_token_probs[pivot - 1], 0);
vp10_cost_tokens_ans((int *)c[t][i][j][k][1][l], tree_probs,
vp10_pareto8_token_probs[pivot - 1], 1);
#else
vpx_prob probs[ENTROPY_NODES];
vp10_model_to_full_probs(p[t][i][j][k][l], probs);
vp10_cost_tokens((int *)c[t][i][j][k][0][l], probs,
vp10_coef_tree);
vp10_cost_tokens_skip((int *)c[t][i][j][k][1][l], probs,
vp10_coef_tree);
#endif // CONFIG_ANS
assert(c[t][i][j][k][0][l][EOB_TOKEN] ==
c[t][i][j][k][1][l][EOB_TOKEN]);
}
......
......@@ -431,11 +431,12 @@ const vp10_extra_bit vp10_extra_bits_high12[ENTROPY_TOKENS] = {
};
#endif
#if !CONFIG_ANS
const struct vp10_token vp10_coef_encodings[ENTROPY_TOKENS] = {
{2, 2}, {6, 3}, {28, 5}, {58, 6}, {59, 6}, {60, 6}, {61, 6}, {124, 7},
{125, 7}, {126, 7}, {127, 7}, {0, 1}
};
#endif // !CONFIG_ANS
struct tokenize_b_args {
VP10_COMP *cpi;
......@@ -783,6 +784,14 @@ void vp10_tokenize_sb(VP10_COMP *cpi, ThreadData *td, TOKENEXTRA **t,
vp10_foreach_transformed_block_in_plane(xd, bsize, plane, tokenize_b,
&arg);
(*t)->token = EOSB_TOKEN;
#if CONFIG_ANS
// TODO(aconverse): clip the number of bits in tokenize_b
// Smuggle TX_SIZE in the unused extrabits field so the ANS encoder
// knows the maximum number of extrabits to write at the end of the block
// (where it starts).
(*t)->extra = (EXTRABIT)(plane ? get_uv_tx_size(mbmi, &xd->plane[plane])
: mbmi->tx_size);
#endif // CONFIG_ANS
(*t)++;
}
} else {
......
......@@ -43,7 +43,9 @@ typedef struct {
extern const vpx_tree_index vp10_coef_tree[];
extern const vpx_tree_index vp10_coef_con_tree[];
#if !CONFIG_ANS
extern const struct vp10_token vp10_coef_encodings[];
#endif // !CONFIG_ANS
int vp10_is_skippable_in_plane(MACROBLOCK *x, BLOCK_SIZE bsize, int plane);
int vp10_has_high_freq_in_plane(MACROBLOCK *x, BLOCK_SIZE bsize, int plane);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment