bitstream.c 176 KB
Newer Older
1
/*
2
 * Copyright (c) 2016, Alliance for Open Media. All rights reserved
3
 *
4 5 6 7 8 9
 * This source code is subject to the terms of the BSD 2 Clause License and
 * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
 * was not distributed with this source code in the LICENSE file, you can
 * obtain it at www.aomedia.org/license/software. If the Alliance for Open
 * Media Patent License 1.0 was not distributed with this source code in the
 * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
10 11 12 13
 */

#include <assert.h>
#include <limits.h>
14
#include <stdio.h>
15

16 17
#include "aom/aom_encoder.h"
#include "aom_dsp/aom_dsp_common.h"
18
#include "aom_dsp/binary_codes_writer.h"
19
#include "aom_dsp/bitwriter_buffer.h"
20
#include "aom_mem/aom_mem.h"
21 22
#include "aom_ports/mem_ops.h"
#include "aom_ports/system_state.h"
23
#if CONFIG_BITSTREAM_DEBUG
24
#include "aom_util/debug_util.h"
25
#endif  // CONFIG_BITSTREAM_DEBUG
26

27
#include "av1/common/cdef.h"
Luc Trudeau's avatar
Luc Trudeau committed
28 29 30
#if CONFIG_CFL
#include "av1/common/cfl.h"
#endif
31 32 33 34
#include "av1/common/entropy.h"
#include "av1/common/entropymode.h"
#include "av1/common/entropymv.h"
#include "av1/common/mvref_common.h"
35
#include "av1/common/odintrin.h"
36 37
#include "av1/common/pred_common.h"
#include "av1/common/reconinter.h"
hui su's avatar
hui su committed
38
#include "av1/common/reconintra.h"
39 40
#include "av1/common/seg_common.h"
#include "av1/common/tile_common.h"
41

42 43 44
#include "av1/encoder/bitstream.h"
#include "av1/encoder/cost.h"
#include "av1/encoder/encodemv.h"
45 46 47
#if CONFIG_LV_MAP
#include "av1/encoder/encodetxb.h"
#endif  // CONFIG_LV_MAP
48
#include "av1/encoder/mcomp.h"
49
#include "av1/encoder/palette.h"
50 51
#include "av1/encoder/segmentation.h"
#include "av1/encoder/tokenize.h"
52

53
#define ENC_MISMATCH_DEBUG 0
54

55
static INLINE void write_uniform(aom_writer *w, int n, int v) {
56 57
  const int l = get_unsigned_bits(n);
  const int m = (1 << l) - n;
58
  if (l == 0) return;
hui su's avatar
hui su committed
59
  if (v < m) {
60
    aom_write_literal(w, v, l - 1);
hui su's avatar
hui su committed
61
  } else {
62 63
    aom_write_literal(w, m + ((v - m) >> 1), l - 1);
    aom_write_literal(w, (v - m) & 1, 1);
hui su's avatar
hui su committed
64 65
  }
}
66

67
#if CONFIG_LOOP_RESTORATION
68 69
static void loop_restoration_write_sb_coeffs(const AV1_COMMON *const cm,
                                             MACROBLOCKD *xd,
70 71
                                             const RestorationUnitInfo *rui,
                                             aom_writer *const w, int plane);
72
#endif  // CONFIG_LOOP_RESTORATION
73 74
#if CONFIG_OBU
static void write_uncompressed_header_obu(AV1_COMP *cpi,
75 76 77
#if CONFIG_EXT_TILE
                                          struct aom_write_bit_buffer *saved_wb,
#endif
78 79 80 81 82 83 84
                                          struct aom_write_bit_buffer *wb);
#else
static void write_uncompressed_header_frame(AV1_COMP *cpi,
                                            struct aom_write_bit_buffer *wb);
#endif

#if !CONFIG_OBU || CONFIG_EXT_TILE
85 86 87 88 89
static int remux_tiles(const AV1_COMMON *const cm, uint8_t *dst,
                       const uint32_t data_size, const uint32_t max_tile_size,
                       const uint32_t max_tile_col_size,
                       int *const tile_size_bytes,
                       int *const tile_col_size_bytes);
90
#endif
91

92 93 94 95
static void write_intra_mode_kf(FRAME_CONTEXT *frame_ctx, const MODE_INFO *mi,
                                const MODE_INFO *above_mi,
                                const MODE_INFO *left_mi, PREDICTION_MODE mode,
                                aom_writer *w) {
96 97 98
#if CONFIG_INTRABC
  assert(!is_intrabc_block(&mi->mbmi));
#endif  // CONFIG_INTRABC
99 100
  (void)mi;
  aom_write_symbol(w, mode, get_y_mode_cdf(frame_ctx, above_mi, left_mi),
101 102
                   INTRA_MODES);
}
103

104
static void write_inter_mode(aom_writer *w, PREDICTION_MODE mode,
105
                             FRAME_CONTEXT *ec_ctx, const int16_t mode_ctx) {
106
  const int16_t newmv_ctx = mode_ctx & NEWMV_CTX_MASK;
Yue Chen's avatar
Yue Chen committed
107

108
  aom_write_symbol(w, mode != NEWMV, ec_ctx->newmv_cdf[newmv_ctx], 2);
109

Jingning Han's avatar
Jingning Han committed
110
  if (mode != NEWMV) {
Sarah Parker's avatar
Sarah Parker committed
111 112 113
    const int16_t zeromv_ctx =
        (mode_ctx >> GLOBALMV_OFFSET) & GLOBALMV_CTX_MASK;
    aom_write_symbol(w, mode != GLOBALMV, ec_ctx->zeromv_cdf[zeromv_ctx], 2);
114

Sarah Parker's avatar
Sarah Parker committed
115
    if (mode != GLOBALMV) {
116 117
      int16_t refmv_ctx = (mode_ctx >> REFMV_OFFSET) & REFMV_CTX_MASK;

118 119 120
      if (mode_ctx & (1 << SKIP_NEARESTMV_OFFSET)) refmv_ctx = 6;
      if (mode_ctx & (1 << SKIP_NEARMV_OFFSET)) refmv_ctx = 7;
      if (mode_ctx & (1 << SKIP_NEARESTMV_SUB8X8_OFFSET)) refmv_ctx = 8;
121
      aom_write_symbol(w, mode != NEARESTMV, ec_ctx->refmv_cdf[refmv_ctx], 2);
122 123
    }
  }
124 125
}

126
static void write_drl_idx(FRAME_CONTEXT *ec_ctx, const MB_MODE_INFO *mbmi,
127 128
                          const MB_MODE_INFO_EXT *mbmi_ext, aom_writer *w) {
  uint8_t ref_frame_type = av1_ref_frame_type(mbmi->ref_frame);
129 130 131

  assert(mbmi->ref_mv_idx < 3);

132
  const int new_mv = mbmi->mode == NEWMV || mbmi->mode == NEW_NEWMV;
133
  if (new_mv) {
134 135
    int idx;
    for (idx = 0; idx < 2; ++idx) {
136
      if (mbmi_ext->ref_mv_count[ref_frame_type] > idx + 1) {
137
        uint8_t drl_ctx =
138
            av1_drl_ctx(mbmi_ext->ref_mv_stack[ref_frame_type], idx);
139

140 141
        aom_write_symbol(w, mbmi->ref_mv_idx != idx, ec_ctx->drl_cdf[drl_ctx],
                         2);
142
        if (mbmi->ref_mv_idx == idx) return;
143
      }
144 145 146 147
    }
    return;
  }

148
  if (have_nearmv_in_inter_mode(mbmi->mode)) {
149 150 151
    int idx;
    // TODO(jingning): Temporary solution to compensate the NEARESTMV offset.
    for (idx = 1; idx < 3; ++idx) {
152
      if (mbmi_ext->ref_mv_count[ref_frame_type] > idx + 1) {
153
        uint8_t drl_ctx =
154
            av1_drl_ctx(mbmi_ext->ref_mv_stack[ref_frame_type], idx);
155 156
        aom_write_symbol(w, mbmi->ref_mv_idx != (idx - 1),
                         ec_ctx->drl_cdf[drl_ctx], 2);
157
        if (mbmi->ref_mv_idx == (idx - 1)) return;
158
      }
159
    }
160
    return;
161 162 163
  }
}

164 165
static void write_inter_compound_mode(AV1_COMMON *cm, MACROBLOCKD *xd,
                                      aom_writer *w, PREDICTION_MODE mode,
166
                                      const int16_t mode_ctx) {
167 168 169 170 171
  assert(is_inter_compound_mode(mode));
  (void)cm;
  aom_write_symbol(w, INTER_COMPOUND_OFFSET(mode),
                   xd->tile_ctx->inter_compound_mode_cdf[mode_ctx],
                   INTER_COMPOUND_MODES);
172
}
173

174
static void write_tx_size_vartx(const AV1_COMMON *cm, MACROBLOCKD *xd,
175
                                const MB_MODE_INFO *mbmi, TX_SIZE tx_size,
176 177
                                int depth, int blk_row, int blk_col,
                                aom_writer *w) {
178 179
  FRAME_CONTEXT *ec_ctx = xd->tile_ctx;
  (void)cm;
180 181
  const int tx_row = blk_row >> 1;
  const int tx_col = blk_col >> 1;
182 183 184
  const int max_blocks_high = max_block_high(xd, mbmi->sb_type, 0);
  const int max_blocks_wide = max_block_wide(xd, mbmi->sb_type, 0);

185
  if (blk_row >= max_blocks_high || blk_col >= max_blocks_wide) return;
186

Jingning Han's avatar
Jingning Han committed
187
  if (depth == MAX_VARTX_DEPTH) {
188 189
    txfm_partition_update(xd->above_txfm_context + blk_col,
                          xd->left_txfm_context + blk_row, tx_size, tx_size);
190 191 192
    return;
  }

193 194 195 196
  int ctx = txfm_partition_context(xd->above_txfm_context + blk_col,
                                   xd->left_txfm_context + blk_row,
                                   mbmi->sb_type, tx_size);

197
  const int write_txfm_partition =
198
      tx_size == mbmi->inter_tx_size[tx_row][tx_col];
199
  if (write_txfm_partition) {
200 201
    aom_write_symbol(w, 0, ec_ctx->txfm_partition_cdf[ctx], 2);

202 203
    txfm_partition_update(xd->above_txfm_context + blk_col,
                          xd->left_txfm_context + blk_row, tx_size, tx_size);
Yue Chen's avatar
Yue Chen committed
204
    // TODO(yuec): set correct txfm partition update for qttx
205
  } else {
206
    const TX_SIZE sub_txs = sub_tx_size_map[1][tx_size];
207 208
    const int bsw = tx_size_wide_unit[sub_txs];
    const int bsh = tx_size_high_unit[sub_txs];
209

210
    aom_write_symbol(w, 1, ec_ctx->txfm_partition_cdf[ctx], 2);
211

212
    if (sub_txs == TX_4X4) {
213 214
      txfm_partition_update(xd->above_txfm_context + blk_col,
                            xd->left_txfm_context + blk_row, sub_txs, tx_size);
215
      return;
216
    }
217

218 219 220 221 222 223 224 225
    assert(bsw > 0 && bsh > 0);
    for (int row = 0; row < tx_size_high_unit[tx_size]; row += bsh)
      for (int col = 0; col < tx_size_wide_unit[tx_size]; col += bsw) {
        int offsetr = blk_row + row;
        int offsetc = blk_col + col;
        write_tx_size_vartx(cm, xd, mbmi, sub_txs, depth + 1, offsetr, offsetc,
                            w);
      }
226 227
  }
}
228

229 230
static void write_selected_tx_size(const AV1_COMMON *cm, const MACROBLOCKD *xd,
                                   aom_writer *w) {
231 232
  const MB_MODE_INFO *const mbmi = &xd->mi[0]->mbmi;
  const BLOCK_SIZE bsize = mbmi->sb_type;
233 234
  FRAME_CONTEXT *ec_ctx = xd->tile_ctx;
  (void)cm;
235
  if (block_signals_txsize(bsize)) {
236
    const TX_SIZE tx_size = mbmi->tx_size;
237
    const int tx_size_ctx = get_tx_size_context(xd, 0);
238 239
    const int depth = tx_size_to_depth(tx_size, bsize, 0);
    const int max_depths = bsize_to_max_depth(bsize, 0);
240
    const int32_t tx_size_cat = bsize_to_tx_size_cat(bsize, 0);
241 242 243

    assert(depth >= 0 && depth <= max_depths);
    assert(!is_inter_block(mbmi));
244
    assert(IMPLIES(is_rect_tx(tx_size), is_rect_tx_allowed(xd, mbmi)));
245

246
    aom_write_symbol(w, depth, ec_ctx->tx_size_cdf[tx_size_cat][tx_size_ctx],
247
                     max_depths + 1);
248 249 250
  }
}

251 252
static int write_skip(const AV1_COMMON *cm, const MACROBLOCKD *xd,
                      int segment_id, const MODE_INFO *mi, aom_writer *w) {
253 254 255 256
  if (segfeature_active(&cm->seg, segment_id, SEG_LVL_SKIP)) {
    return 1;
  } else {
    const int skip = mi->mbmi.skip;
257
    const int ctx = av1_get_skip_context(xd);
258 259
    FRAME_CONTEXT *ec_ctx = xd->tile_ctx;
    aom_write_symbol(w, skip, ec_ctx->skip_cdfs[ctx], 2);
260 261 262 263
    return skip;
  }
}

264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281
#if CONFIG_EXT_SKIP
static int write_skip_mode(const AV1_COMMON *cm, const MACROBLOCKD *xd,
                           int segment_id, const MODE_INFO *mi, aom_writer *w) {
  if (!cm->skip_mode_flag) return 0;
  if (segfeature_active(&cm->seg, segment_id, SEG_LVL_SKIP)) {
    return 0;
  }
  const int skip_mode = mi->mbmi.skip_mode;
  if (!is_comp_ref_allowed(mi->mbmi.sb_type)) {
    assert(!skip_mode);
    return 0;
  }
  const int ctx = av1_get_skip_mode_context(xd);
  aom_write_symbol(w, skip_mode, xd->tile_ctx->skip_mode_cdfs[ctx], 2);
  return skip_mode;
}
#endif  // CONFIG_EXT_SKIP

282 283 284
static void write_is_inter(const AV1_COMMON *cm, const MACROBLOCKD *xd,
                           int segment_id, aom_writer *w, const int is_inter) {
  if (!segfeature_active(&cm->seg, segment_id, SEG_LVL_REF_FRAME)) {
285
    const int ctx = av1_get_intra_inter_context(xd);
286 287 288 289 290
    FRAME_CONTEXT *ec_ctx = xd->tile_ctx;
    aom_write_symbol(w, is_inter, ec_ctx->intra_inter_cdf[ctx], 2);
  }
}

291 292
static void write_motion_mode(const AV1_COMMON *cm, MACROBLOCKD *xd,
                              const MODE_INFO *mi, aom_writer *w) {
293
  const MB_MODE_INFO *mbmi = &mi->mbmi;
294

295
  MOTION_MODE last_motion_mode_allowed =
296
      motion_mode_allowed(cm->global_motion, xd, mi);
297 298 299 300 301 302 303 304 305 306
  switch (last_motion_mode_allowed) {
    case SIMPLE_TRANSLATION: break;
    case OBMC_CAUSAL:
      aom_write_symbol(w, mbmi->motion_mode == OBMC_CAUSAL,
                       xd->tile_ctx->obmc_cdf[mbmi->sb_type], 2);
      break;
    default:
      aom_write_symbol(w, mbmi->motion_mode,
                       xd->tile_ctx->motion_mode_cdf[mbmi->sb_type],
                       MOTION_MODES);
Yue Chen's avatar
Yue Chen committed
307 308
  }
}
309

310 311
static void write_delta_qindex(const AV1_COMMON *cm, const MACROBLOCKD *xd,
                               int delta_qindex, aom_writer *w) {
312 313
  int sign = delta_qindex < 0;
  int abs = sign ? -delta_qindex : delta_qindex;
314
  int rem_bits, thr;
315
  int smallval = abs < DELTA_Q_SMALL ? 1 : 0;
316 317
  FRAME_CONTEXT *ec_ctx = xd->tile_ctx;
  (void)cm;
318

319 320
  aom_write_symbol(w, AOMMIN(abs, DELTA_Q_SMALL), ec_ctx->delta_q_cdf,
                   DELTA_Q_PROBS + 1);
321 322 323 324

  if (!smallval) {
    rem_bits = OD_ILOG_NZ(abs - 1) - 1;
    thr = (1 << rem_bits) + 1;
325
    aom_write_literal(w, rem_bits - 1, 3);
326
    aom_write_literal(w, abs - thr, rem_bits);
327 328 329 330 331
  }
  if (abs > 0) {
    aom_write_bit(w, sign);
  }
}
332

333 334
#if CONFIG_EXT_DELTA_Q
static void write_delta_lflevel(const AV1_COMMON *cm, const MACROBLOCKD *xd,
335 336 337
#if CONFIG_LOOPFILTER_LEVEL
                                int lf_id,
#endif
338 339 340 341 342 343 344 345
                                int delta_lflevel, aom_writer *w) {
  int sign = delta_lflevel < 0;
  int abs = sign ? -delta_lflevel : delta_lflevel;
  int rem_bits, thr;
  int smallval = abs < DELTA_LF_SMALL ? 1 : 0;
  FRAME_CONTEXT *ec_ctx = xd->tile_ctx;
  (void)cm;

346
#if CONFIG_LOOPFILTER_LEVEL
347 348 349 350 351 352 353 354
  if (cm->delta_lf_multi) {
    assert(lf_id >= 0 && lf_id < FRAME_LF_COUNT);
    aom_write_symbol(w, AOMMIN(abs, DELTA_LF_SMALL),
                     ec_ctx->delta_lf_multi_cdf[lf_id], DELTA_LF_PROBS + 1);
  } else {
    aom_write_symbol(w, AOMMIN(abs, DELTA_LF_SMALL), ec_ctx->delta_lf_cdf,
                     DELTA_LF_PROBS + 1);
  }
355
#else
356 357
  aom_write_symbol(w, AOMMIN(abs, DELTA_LF_SMALL), ec_ctx->delta_lf_cdf,
                   DELTA_LF_PROBS + 1);
358
#endif  // CONFIG_LOOPFILTER_LEVEL
359 360 361 362

  if (!smallval) {
    rem_bits = OD_ILOG_NZ(abs - 1) - 1;
    thr = (1 << rem_bits) + 1;
363
    aom_write_literal(w, rem_bits - 1, 3);
364 365 366 367 368 369 370
    aom_write_literal(w, abs - thr, rem_bits);
  }
  if (abs > 0) {
    aom_write_bit(w, sign);
  }
}
#endif  // CONFIG_EXT_DELTA_Q
371

372 373
static void pack_map_tokens(aom_writer *w, const TOKENEXTRA **tp, int n,
                            int num) {
374
  const TOKENEXTRA *p = *tp;
375 376 377 378
  write_uniform(w, n, p->token);  // The first color index.
  ++p;
  --num;
  for (int i = 0; i < num; ++i) {
379
    aom_write_symbol(w, p->token, p->color_map_cdf, n);
hui su's avatar
hui su committed
380 381 382 383
    ++p;
  }
  *tp = p;
}
384

385
#if !CONFIG_LV_MAP
386 387 388 389 390 391 392 393 394 395 396 397 398 399
static INLINE void write_coeff_extra(const aom_cdf_prob *const *cdf, int val,
                                     int n, aom_writer *w) {
  // Code the extra bits from LSB to MSB in groups of 4
  int i = 0;
  int count = 0;
  while (count < n) {
    const int size = AOMMIN(n - count, 4);
    const int mask = (1 << size) - 1;
    aom_write_cdf(w, val & mask, cdf[i++], 1 << size);
    val >>= size;
    count += size;
  }
}

400
static void pack_mb_tokens(aom_writer *w, const TOKENEXTRA **tp,
401
                           const TOKENEXTRA *const stop,
402 403
                           aom_bit_depth_t bit_depth, const TX_SIZE tx_size,
                           TOKEN_STATS *token_stats) {
404
  const TOKENEXTRA *p = *tp;
405
  int count = 0;
406
  const int seg_eob = av1_get_max_eob(tx_size);
407 408

  while (p < stop && p->token != EOSB_TOKEN) {
409
    const int token = p->token;
Yaowu Xu's avatar
Yaowu Xu committed
410
    const int8_t eob_val = p->eob_val;
411
    if (token == BLOCK_Z_TOKEN) {
412
      aom_write_symbol(w, 0, *p->head_cdf, HEAD_TOKENS + 1);
413
      p++;
414
      break;
415 416
      continue;
    }
Yaowu Xu's avatar
Yaowu Xu committed
417 418

    const av1_extra_bit *const extra_bits = &av1_extra_bits[token];
419
    if (eob_val == LAST_EOB) {
420 421 422
      // Just code a flag indicating whether the value is >1 or 1.
      aom_write_bit(w, token != ONE_TOKEN);
    } else {
423
      int comb_symb = 2 * AOMMIN(token, TWO_TOKEN) - eob_val + p->first_val;
424
      aom_write_symbol(w, comb_symb, *p->head_cdf, HEAD_TOKENS + p->first_val);
425
    }
426
    if (token > ONE_TOKEN) {
427
      aom_write_symbol(w, token - TWO_TOKEN, *p->tail_cdf, TAIL_TOKENS);
428
    }
429 430 431 432

    if (extra_bits->base_val) {
      const int bit_string = p->extra;
      const int bit_string_length = extra_bits->len;  // Length of extra bits to
433
      const int is_cat6 = (extra_bits->base_val == CAT6_MIN_VAL);
434 435
      // be written excluding
      // the sign bit.
436 437 438 439
      int skip_bits =
          is_cat6
              ? CAT6_BIT_SIZE - av1_get_cat6_extrabits_size(tx_size, bit_depth)
              : 0;
440

441 442 443 444
      assert(!(bit_string >> (bit_string_length - skip_bits + 1)));
      if (bit_string_length > 0)
        write_coeff_extra(extra_bits->cdf, bit_string >> 1,
                          bit_string_length - skip_bits, w);
445 446 447 448 449 450

      aom_write_bit_record(w, bit_string & 1, token_stats);
    }
    ++p;

    ++count;
451
    if (eob_val == EARLY_EOB || count == seg_eob) break;
452 453 454 455
  }

  *tp = p;
}
Angie Chiang's avatar
Angie Chiang committed
456
#endif  // !CONFIG_LV_MAP
457

458
#if CONFIG_LV_MAP
459
static void pack_txb_tokens(aom_writer *w, AV1_COMMON *cm, MACROBLOCK *const x,
460
                            const TOKENEXTRA **tp,
461 462
                            const TOKENEXTRA *const tok_end, MACROBLOCKD *xd,
                            MB_MODE_INFO *mbmi, int plane,
463 464 465 466
                            BLOCK_SIZE plane_bsize, aom_bit_depth_t bit_depth,
                            int block, int blk_row, int blk_col,
                            TX_SIZE tx_size, TOKEN_STATS *token_stats) {
  const struct macroblockd_plane *const pd = &xd->plane[plane];
467 468
  const int tx_row = blk_row >> (1 - pd->subsampling_y);
  const int tx_col = blk_col >> (1 - pd->subsampling_x);
469 470 471 472 473
  const int max_blocks_high = max_block_high(xd, plane_bsize, plane);
  const int max_blocks_wide = max_block_wide(xd, plane_bsize, plane);

  if (blk_row >= max_blocks_high || blk_col >= max_blocks_wide) return;

474
  const TX_SIZE plane_tx_size =
475
      plane ? av1_get_uv_tx_size(mbmi, pd->subsampling_x, pd->subsampling_y)
476
            : mbmi->inter_tx_size[tx_row][tx_col];
477

478
  if (tx_size == plane_tx_size || plane) {
479 480 481 482 483 484 485
    TOKEN_STATS tmp_token_stats;
    init_token_stats(&tmp_token_stats);

    tran_low_t *tcoeff = BLOCK_OFFSET(x->mbmi_ext->tcoeff[plane], block);
    uint16_t eob = x->mbmi_ext->eobs[plane][block];
    TXB_CTX txb_ctx = { x->mbmi_ext->txb_skip_ctx[plane][block],
                        x->mbmi_ext->dc_sign_ctx[plane][block] };
486 487
    av1_write_coeffs_txb(cm, xd, w, blk_row, blk_col, plane, tx_size, tcoeff,
                         eob, &txb_ctx);
488 489 490 491 492
#if CONFIG_RD_DEBUG
    token_stats->txb_coeff_cost_map[blk_row][blk_col] = tmp_token_stats.cost;
    token_stats->cost += tmp_token_stats.cost;
#endif
  } else {
493
    const TX_SIZE sub_txs = sub_tx_size_map[1][tx_size];
494 495
    const int bsw = tx_size_wide_unit[sub_txs];
    const int bsh = tx_size_high_unit[sub_txs];
496

497
    assert(bsw > 0 && bsh > 0);
498

499 500 501 502 503
    for (int r = 0; r < tx_size_high_unit[tx_size]; r += bsh) {
      for (int c = 0; c < tx_size_wide_unit[tx_size]; c += bsw) {
        const int offsetr = blk_row + r;
        const int offsetc = blk_col + c;
        const int step = bsh * bsw;
504

505
        if (offsetr >= max_blocks_high || offsetc >= max_blocks_wide) continue;
506

507 508 509 510 511
        pack_txb_tokens(w, cm, x, tp, tok_end, xd, mbmi, plane, plane_bsize,
                        bit_depth, block, offsetr, offsetc, sub_txs,
                        token_stats);
        block += step;
      }
512 513 514 515
    }
  }
}
#else  // CONFIG_LV_MAP
516
static void pack_txb_tokens(aom_writer *w, const TOKENEXTRA **tp,
Yushin Cho's avatar
Yushin Cho committed
517 518
                            const TOKENEXTRA *const tok_end, MACROBLOCKD *xd,
                            MB_MODE_INFO *mbmi, int plane,
519
                            BLOCK_SIZE plane_bsize, aom_bit_depth_t bit_depth,
520
                            int block, int blk_row, int blk_col,
521
                            TX_SIZE tx_size, TOKEN_STATS *token_stats) {
522
  const struct macroblockd_plane *const pd = &xd->plane[plane];
523 524
  const int tx_row = blk_row >> (1 - pd->subsampling_y);
  const int tx_col = blk_col >> (1 - pd->subsampling_x);
525 526
  const int max_blocks_high = max_block_high(xd, plane_bsize, plane);
  const int max_blocks_wide = max_block_wide(xd, plane_bsize, plane);
527

528
  if (blk_row >= max_blocks_high || blk_col >= max_blocks_wide) return;
529

530
  const TX_SIZE plane_tx_size =
531
      plane ? av1_get_uv_tx_size(mbmi, pd->subsampling_x, pd->subsampling_y)
532
            : mbmi->inter_tx_size[tx_row][tx_col];
533

534
  if (tx_size == plane_tx_size || plane) {
535 536
    TOKEN_STATS tmp_token_stats;
    init_token_stats(&tmp_token_stats);
537
    pack_mb_tokens(w, tp, tok_end, bit_depth, tx_size, &tmp_token_stats);
538 539 540 541
#if CONFIG_RD_DEBUG
    token_stats->txb_coeff_cost_map[blk_row][blk_col] = tmp_token_stats.cost;
    token_stats->cost += tmp_token_stats.cost;
#endif
542
  } else {
543
    const TX_SIZE sub_txs = sub_tx_size_map[1][tx_size];
544 545
    const int bsw = tx_size_wide_unit[sub_txs];
    const int bsh = tx_size_high_unit[sub_txs];
546

547
    assert(bsw > 0 && bsh > 0);
548

549 550 551 552 553
    for (int r = 0; r < tx_size_high_unit[tx_size]; r += bsh) {
      for (int c = 0; c < tx_size_wide_unit[tx_size]; c += bsw) {
        const int offsetr = blk_row + r;
        const int offsetc = blk_col + c;
        const int step = bsh * bsw;
554

555
        if (offsetr >= max_blocks_high || offsetc >= max_blocks_wide) continue;
556

557 558 559 560
        pack_txb_tokens(w, tp, tok_end, xd, mbmi, plane, plane_bsize, bit_depth,
                        block, offsetr, offsetc, sub_txs, token_stats);
        block += step;
      }
561 562 563
    }
  }
}
564
#endif  // CONFIG_LV_MAP
565

566
#if CONFIG_SPATIAL_SEGMENTATION
567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589
static int neg_interleave(int x, int ref, int max) {
  const int diff = x - ref;
  if (!ref) return x;
  if (ref >= (max - 1)) return -diff;
  if (2 * ref < max) {
    if (abs(diff) <= ref) {
      if (diff > 0)
        return (diff << 1) - 1;
      else
        return ((-diff) << 1);
    }
    return x;
  } else {
    if (abs(diff) < (max - ref)) {
      if (diff > 0)
        return (diff << 1) - 1;
      else
        return ((-diff) << 1);
    }
    return (max - x) - 1;
  }
}

590 591 592 593 594
static void write_segment_id(AV1_COMP *cpi, const MB_MODE_INFO *const mbmi,
                             aom_writer *w, const struct segmentation *seg,
                             struct segmentation_probs *segp, int mi_row,
                             int mi_col, int skip) {
  AV1_COMMON *const cm = &cpi->common;
595
  MACROBLOCKD *const xd = &cpi->td.mb.e_mbd;
596 597 598
  int prev_ul = -1; /* Top left segment_id */
  int prev_l = -1;  /* Current left segment_id */
  int prev_u = -1;  /* Current top segment_id */
599

600
  if (!seg->enabled || !seg->update_map) return;
601

602
  if ((xd->up_available) && (xd->left_available))
603 604
    prev_ul = get_segment_id(cm, cm->current_frame_seg_map, BLOCK_4X4,
                             mi_row - 1, mi_col - 1);
605

606
  if (xd->up_available)
607 608
    prev_u = get_segment_id(cm, cm->current_frame_seg_map, BLOCK_4X4,
                            mi_row - 1, mi_col - 0);
609

610
  if (xd->left_available)
611 612
    prev_l = get_segment_id(cm, cm->current_frame_seg_map, BLOCK_4X4,
                            mi_row - 0, mi_col - 1);
613

614 615
  int cdf_num = pick_spatial_seg_cdf(prev_ul, prev_u, prev_l);
  int pred = pick_spatial_seg_pred(prev_ul, prev_u, prev_l);
616 617

  if (skip) {
618 619 620 621 622 623
    set_spatial_segment_id(cm, cm->current_frame_seg_map, mbmi->sb_type, mi_row,
                           mi_col, pred);
    set_spatial_segment_id(cm, cpi->segmentation_map, mbmi->sb_type, mi_row,
                           mi_col, pred);
    /* mbmi is read only but we need to update segment_id */
    ((MB_MODE_INFO *)mbmi)->segment_id = pred;
624 625 626
    return;
  }

627 628
  int coded_id =
      neg_interleave(mbmi->segment_id, pred, cm->last_active_segid + 1);
629

630
  aom_cdf_prob *pred_cdf = segp->spatial_pred_seg_cdf[cdf_num];
631 632
  aom_write_symbol(w, coded_id, pred_cdf, 8);

633 634
  set_spatial_segment_id(cm, cm->current_frame_seg_map, mbmi->sb_type, mi_row,
                         mi_col, mbmi->segment_id);
635
}
636
#else
637
static void write_segment_id(aom_writer *w, const struct segmentation *seg,
638
                             struct segmentation_probs *segp, int segment_id) {
639 640 641
  if (seg->enabled && seg->update_map) {
    aom_write_symbol(w, segment_id, segp->tree_cdf, MAX_SEGMENTS);
  }
642
}
643
#endif
644

645
#define WRITE_REF_BIT(bname, pname) \
646
  aom_write_symbol(w, bname, av1_get_pred_cdf_##pname(cm, xd), 2)
647 648
#define WRITE_REF_BIT2(bname, pname) \
  aom_write_symbol(w, bname, av1_get_pred_cdf_##pname(xd), 2)
649

650
// This function encodes the reference frame
651 652
static void write_ref_frames(const AV1_COMMON *cm, const MACROBLOCKD *xd,
                             aom_writer *w) {
653 654 655 656 657 658 659 660 661
  const MB_MODE_INFO *const mbmi = &xd->mi[0]->mbmi;
  const int is_compound = has_second_ref(mbmi);
  const int segment_id = mbmi->segment_id;

  // If segment level coding of this signal is disabled...
  // or the segment allows multiple reference frame options
  if (segfeature_active(&cm->seg, segment_id, SEG_LVL_REF_FRAME)) {
    assert(!is_compound);
    assert(mbmi->ref_frame[0] ==
662
           get_segdata(&cm->seg, segment_id, SEG_LVL_REF_FRAME));
663
  }
Sarah Parker's avatar
Sarah Parker committed
664
#if CONFIG_SEGMENT_GLOBALMV
665
  else if (segfeature_active(&cm->seg, segment_id, SEG_LVL_SKIP) ||
Sarah Parker's avatar
Sarah Parker committed
666
           segfeature_active(&cm->seg, segment_id, SEG_LVL_GLOBALMV))
667 668 669 670 671 672
#else
  else if (segfeature_active(&cm->seg, segment_id, SEG_LVL_SKIP))
#endif
  {
    assert(!is_compound);
    assert(mbmi->ref_frame[0] == LAST_FRAME);
673 674 675 676
  } else {
    // does the feature use compound prediction or not
    // (if not specified at the frame/segment level)
    if (cm->reference_mode == REFERENCE_MODE_SELECT) {
677
      if (is_comp_ref_allowed(mbmi->sb_type))
678
        aom_write_symbol(w, is_compound, av1_get_reference_mode_cdf(cm, xd), 2);
679
    } else {
680
      assert((!is_compound) == (cm->reference_mode == SINGLE_REFERENCE));
681 682 683
    }

    if (is_compound) {
684 685 686 687
#if CONFIG_EXT_COMP_REFS
      const COMP_REFERENCE_TYPE comp_ref_type = has_uni_comp_refs(mbmi)
                                                    ? UNIDIR_COMP_REFERENCE
                                                    : BIDIR_COMP_REFERENCE;
688 689
      aom_write_symbol(w, comp_ref_type, av1_get_comp_reference_type_cdf(xd),
                       2);
690 691 692

      if (comp_ref_type == UNIDIR_COMP_REFERENCE) {
        const int bit = mbmi->ref_frame[0] == BWDREF_FRAME;
693
        WRITE_REF_BIT2(bit, uni_comp_ref_p);
694

695
        if (!bit) {
696
          assert(mbmi->ref_frame[0] == LAST_FRAME);
697 698 699 700 701 702
          const int bit1 = mbmi->ref_frame[1] == LAST3_FRAME ||
                           mbmi->ref_frame[1] == GOLDEN_FRAME;
          WRITE_REF_BIT2(bit1, uni_comp_ref_p1);
          if (bit1) {
            const int bit2 = mbmi->ref_frame[1] == GOLDEN_FRAME;
            WRITE_REF_BIT2(bit2, uni_comp_ref_p2);
703 704 705
          }
        } else {
          assert(mbmi->ref_frame[1] == ALTREF_FRAME);
706 707 708 709
        }

        return;
      }
710 711

      assert(comp_ref_type == BIDIR_COMP_REFERENCE);
712 713
#endif  // CONFIG_EXT_COMP_REFS

714
      const int bit = (mbmi->ref_frame[0] == GOLDEN_FRAME ||
715
                       mbmi->ref_frame[0] == LAST3_FRAME);
716
      WRITE_REF_BIT(bit, comp_ref_p);
717 718

      if (!bit) {
719
        const int bit1 = mbmi->ref_frame[0] == LAST2_FRAME;
720
        WRITE_REF_BIT(bit1, comp_ref_p1);
721
      } else {
722 723
        const int bit2 = mbmi->ref_frame[0] == GOLDEN_FRAME;
        WRITE_REF_BIT(bit2, comp_ref_p2);
724 725
      }

726 727 728 729 730
      const int bit_bwd = mbmi->ref_frame[1] == ALTREF_FRAME;
      WRITE_REF_BIT(bit_bwd, comp_bwdref_p);

      if (!bit_bwd) {
        WRITE_REF_BIT(mbmi->ref_frame[1] == ALTREF2_FRAME, comp_bwdref_p1);
Zoe Liu's avatar
Zoe Liu committed
731 732
      }

733
    } else {
Zoe Liu's avatar
Zoe Liu committed
734 735
      const int bit0 = (mbmi->ref_frame[0] <= ALTREF_FRAME &&
                        mbmi->ref_frame[0] >= BWDREF_FRAME);
736
      WRITE_REF_BIT(bit0, single_ref_p1);
737 738

      if (bit0) {
739 740 741 742 743
        const int bit1 = mbmi->ref_frame[0] == ALTREF_FRAME;
        WRITE_REF_BIT(bit1, single_ref_p2);

        if (!bit1) {
          WRITE_REF_BIT(mbmi->ref_frame[0] == ALTREF2_FRAME, single_ref_p6);
Zoe Liu's avatar
Zoe Liu committed
744
        }
745 746
      } else {
        const int bit2 = (mbmi->ref_frame[0] == LAST3_FRAME ||
747
                          mbmi->ref_frame[0] == GOLDEN_FRAME);
748
        WRITE_REF_BIT(bit2, single_ref_p3);
749 750

        if (!bit2) {
751 752
          const int bit3 = mbmi->ref_frame[0] != LAST_FRAME;
          WRITE_REF_BIT(bit3, single_ref_p4);
753
        } else {
754 755
          const int bit4 = mbmi->ref_frame[0] != LAST3_FRAME;
          WRITE_REF_BIT(bit4, single_ref_p5);
756 757
        }
      }
758 759 760 761
    }
  }
}

762
#if CONFIG_FILTER_INTRA
763
static void write_filter_intra_mode_info(const MACROBLOCKD *xd,
764 765
                                         const MB_MODE_INFO *const mbmi,
                                         aom_writer *w) {
766 767
  if (mbmi->mode == DC_PRED && mbmi->palette_mode_info.palette_size[0] == 0 &&
      av1_filter_intra_allowed_txsize(mbmi->tx_size)) {
768
    aom_write_symbol(w, mbmi->filter_intra_mode_info.use_filter_intra,
769
                     xd->tile_ctx->filter_intra_cdfs[mbmi->tx_size], 2);
770
    if (mbmi->filter_intra_mode_info.use_filter_intra) {
771
      const FILTER_INTRA_MODE mode =
772
          mbmi->filter_intra_mode_info.filter_intra_mode;
773
      aom_write_symbol(w, mode, xd->tile_ctx->filter_intra_mode_cdf,
774
                       FILTER_INTRA_MODES);
hui su's avatar
hui su committed
775 776 777
    }
  }
}
778
#endif  // CONFIG_FILTER_INTRA
779

780 781
static void write_intra_angle_info(const MACROBLOCKD *xd,
                                   FRAME_CONTEXT *const ec_ctx, aom_writer *w) {
782 783
  const MB_MODE_INFO *const mbmi = &xd->mi[0]->mbmi;
  const BLOCK_SIZE bsize = mbmi->sb_type;
784
  if (!av1_use_angle_delta(bsize)) return;
785

hui su's avatar
hui su committed
786
  if (av1_is_directional_mode(mbmi->mode, bsize)) {
787 788 789 790 791 792
#if CONFIG_EXT_INTRA_MOD
    aom_write_symbol(w, mbmi->angle_delta[0] + MAX_ANGLE_DELTA,
                     ec_ctx->angle_delta_cdf[mbmi->mode - V_PRED],
                     2 * MAX_ANGLE_DELTA + 1);
#else
    (void)ec_ctx;
hui su's avatar
hui su committed
793 794
    write_uniform(w, 2 * MAX_ANGLE_DELTA + 1,
                  MAX_ANGLE_DELTA + mbmi->angle_delta[0]);
795
#endif  // CONFIG_EXT_INTRA_MOD
796 797
  }

Luc Trudeau's avatar
Luc Trudeau committed
798
  if (av1_is_directional_mode(get_uv_mode(mbmi->uv_mode), bsize)) {
799 800 801 802 803
#if CONFIG_EXT_INTRA_MOD
    aom_write_symbol(w, mbmi->angle_delta[1] + MAX_ANGLE_DELTA,
                     ec_ctx->angle_delta_cdf[mbmi->uv_mode - V_PRED],
                     2 * MAX_ANGLE_DELTA + 1);
#else
hui su's avatar
hui su committed
804 805
    write_uniform(w, 2 * MAX_ANGLE_DELTA + 1,
                  MAX_ANGLE_DELTA + mbmi->angle_delta[1]);
806
#endif
807 808
  }
}
hui su's avatar
hui su committed
809

Angie Chiang's avatar
Angie Chiang committed
810 811
static void write_mb_interp_filter(AV1_COMP *cpi, const MACROBLOCKD *xd,
                                   aom_writer *w) {
812
  AV1_COMMON *const cm = &cpi->common;
813
  const MB_MODE_INFO *const mbmi = &xd->mi[0]->mbmi;
814
  FRAME_CONTEXT *ec_ctx = xd->tile_ctx;
815

816
  if (!av1_is_interp_needed(xd)) {
817 818 819
    assert(mbmi->interp_filters ==
           av1_broadcast_interp_filter(
               av1_unswitchable_filter(cm->interp_filter)));
820 821
    return;
  }
822
  if (cm->interp_filter == SWITCHABLE) {
823
#if CONFIG_DUAL_FILTER
824
    int dir;
825
    for (dir = 0; dir < 2; ++dir) {
826
      if (has_subpel_mv_component(xd->mi[0], xd, dir) ||
827
          (mbmi->ref_frame[1] > INTRA_FRAME &&
828
           has_subpel_mv_component(xd->mi[0], xd, dir + 2))) {
829
        const int ctx = av1_get_pred_context_switchable_interp(xd, dir);
830 831 832
        InterpFilter filter =
            av1_extract_interp_filter(mbmi->interp_filters, dir);
        aom_write_symbol(w, filter, ec_ctx->switchable_interp_cdf[ctx],
833
                         SWITCHABLE_FILTERS);
834
        ++cpi->interp_filter_selected[0][filter];
835
      } else {
836 837
        assert(av1_extract_interp_filter(mbmi->interp_filters, dir) ==
               EIGHTTAP_REGULAR);
838 839 840
      }
    }
#else
841
    {
842
      const int ctx = av1_get_pred_context_switchable_interp(xd);
843 844 845 846
      InterpFilter filter = av1_extract_interp_filter(mbmi->interp_filters, 0);
      aom_write_symbol(w, filter, ec_ctx->switchable_interp_cdf[ctx],
                       SWITCHABLE_FILTERS);
      ++cpi->interp_filter_selected[0][filter];
847
    }
848
#endif  // CONFIG_DUAL_FILTER
849 850 851
  }
}

852 853 854 855 856 857 858
// Transmit color values with delta encoding. Write the first value as
// literal, and the deltas between each value and the previous one. "min_val" is
// the smallest possible value of the deltas.
static void delta_encode_palette_colors(const int *colors, int num,
                                        int bit_depth, int min_val,
                                        aom_writer *w) {
  if (num <= 0) return;
859
  assert(colors[0] < (1 << bit_depth));
860 861 862 863 864 865
  aom_write_literal(w, colors[0], bit_depth);
  if (num == 1) return;
  int max_delta = 0;
  int deltas[PALETTE_MAX_SIZE];
  memset(deltas, 0, sizeof(deltas));
  for (int i = 1; i < num; ++i) {
866
    assert(colors[i] < (1 << bit_depth));
867 868 869 870 871 872 873
    const int delta = colors[i] - colors[i - 1];
    deltas[i - 1] = delta;
    assert(delta >= min_val);
    if (delta > max_delta) max_delta = delta;
  }
  const int min_bits = bit_depth - 3;
  int bits = AOMMAX(av1_ceil_log2(max_delta + 1 - min_val), min_bits);
874
  assert(bits <= bit_depth);
875 876 877 878 879 880 881 882 883 884 885 886 887 888
  int range = (1 << bit_depth) - colors[0] - min_val;
  aom_write_literal(w, bits - min_bits, 2);
  for (int i = 0; i < num - 1; ++i) {
    aom_write_literal(w, deltas[i] - min_val, bits);
    range -= deltas[i];
    bits = AOMMIN(bits, av1_ceil_log2(range));
  }
}

// Transmit luma palette color values. First signal if each color in the color
// cache is used. Those colors that are not in the cache are transmitted with
// delta encoding.
static void write_palette_colors_y(const MACROBLOCKD *const xd,
                                   const PALETTE_MODE_INFO *const pmi,
889 890
                                   int bit_depth, aom_writer *w) {
  const int n = pmi->palette_size[0];
891
  uint16_t color_cache[2 * PALETTE_MAX_SIZE];
892
  const int n_cache = av1_get_palette_cache(xd, 0, color_cache);
893 894 895 896 897 898 899 900 901 902
  int out_cache_colors[PALETTE_MAX_SIZE];
  uint8_t cache_color_found[2 * PALETTE_MAX_SIZE];
  const int n_out_cache =
      av1_index_color_cache(color_cache, n_cache, pmi->palette_colors, n,
                            cache_color_found, out_cache_colors);
  int n_in_cache = 0;
  for (int i = 0; i < n_cache && n_in_cache < n; ++i) {
    const int found = cache_color_found[i];
    aom_write_bit(w, found);
    n_in_cache += found;
903
  }
904 905
  assert(n_in_cache + n_out_cache == n);
  delta_encode_palette_colors(out_cache_colors, n_out_cache, bit_depth, 1, w);
906 907
}

908 909 910 911 912
// Write chroma palette color values. U channel is handled similarly to the luma
// channel. For v channel, either use delta encoding or transmit raw values
// directly, whichever costs less.
static void write_palette_colors_uv(const MACROBLOCKD *const xd,
                                    const PALETTE_MODE_INFO *const pmi,
913 914 915 916 917
                                    int bit_depth, aom_writer *w) {
  const int n = pmi->palette_size[1];
  const uint16_t *colors_u = pmi->palette_colors + PALETTE_MAX_SIZE;
  const uint16_t *colors_v = pmi->palette_colors + 2 * PALETTE_MAX_SIZE;
  // U channel colors.
918
  uint16_t color_cache[2 * PALETTE_MAX_SIZE];
919
  const int n_cache = av1_get_palette_cache(xd, 1, color_cache);
920 921 922 923 924 925 926 927 928
  int out_cache_colors[PALETTE_MAX_SIZE];
  uint8_t cache_color_found[2 * PALETTE_MAX_SIZE];
  const int n_out_cache = av1_index_color_cache(
      color_cache, n_cache, colors_u, n, cache_color_found, out_cache_colors);
  int n_in_cache = 0;
  for (int i = 0; i < n_cache && n_in_cache < n; ++i) {
    const int found = cache_color_found[i];
    aom_write_bit(w, found);
    n_in_cache += found;
929
  }
930 931 932
  delta_encode_palette_colors(out_cache_colors, n_out_cache, bit_depth, 0, w);

  // V channel colors. Don't use color cache as the colors are not sorted.
933 934 935 936 937 938 939 940
  const int max_val = 1 << bit_depth;
  int zero_count = 0, min_bits_v = 0;
  int bits_v =
      av1_get_palette_delta_bits_v(pmi, bit_depth, &zero_count, &min_bits_v);
  const int rate_using_delta =
      2 + bit_depth + (bits_v + 1) * (n - 1) - zero_count;
  const int rate_using_raw = bit_depth * n;
  if (rate_using_delta < rate_using_raw) {  // delta encoding
941
    assert(colors_v[0] < (1 << bit_depth));
942 943 944
    aom_write_bit(w, 1);
    aom_write_literal(w, bits_v - min_bits_v, 2);
    aom_write_literal(w, colors_v[0], bit_depth);
945
    for (int i = 1; i < n; ++i) {
946
      assert(colors_v[i] < (1 << bit_depth));
947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962
      if (colors_v[i] == colors_v[i - 1]) {  // No need to signal sign bit.
        aom_write_literal(w, 0, bits_v);
        continue;
      }
      const int delta = abs((int)colors_v[i] - colors_v[i - 1]);
      const int sign_bit = colors_v[i] < colors_v[i - 1];
      if (delta <= max_val - delta) {
        aom_write_literal(w, delta, bits_v);
        aom_write_bit(w, sign_bit);
      } else {
        aom_write_literal(w, max_val - delta, bits_v);
        aom_write_bit(w, !sign_bit);
      }
    }
  } else {  // Transmit raw values.
    aom_write_bit(w, 0);
963 964 965 966
    for (int i = 0; i < n; ++i) {
      assert(colors_v[i] < (1 << bit_depth));
      aom_write_literal(w, colors_v[i], bit_depth);
    }
967 968 969
  }
}

970
static void write_palette_mode_info(const AV1_COMMON *cm, const MACROBLOCKD *xd,
971 972
                                    const MODE_INFO *const mi, int mi_row,
                                    int mi_col, aom_writer *w) {
973
  const int num_planes = av1_num_planes(cm);
974 975
  const MB_MODE_INFO *const mbmi = &mi->mbmi;
  const BLOCK_SIZE bsize = mbmi->sb_type;
976
  assert(av1_allow_palette(cm->allow_screen_content_tools, bsize));
977
  const PALETTE_MODE_INFO *const pmi = &mbmi->palette_mode_info;
978
  const int bsize_ctx = av1_get_palette_bsize_ctx(bsize);
979

980
  if (mbmi->mode == DC_PRED) {
981
    const int n = pmi->palette_size[0];
Hui Su's avatar
Hui Su committed
982
    const int palette_y_mode_ctx = av1_get_palette_mode_ctx(xd);
983 984
    aom_write_symbol(
        w, n > 0,
985
        xd->tile_ctx->palette_y_mode_cdf[bsize_ctx][palette_y_mode_ctx], 2);
986
    if (n > 0) {
987
      aom_write_symbol(w, n - PALETTE_MIN_SIZE,
988
                       xd->tile_ctx->palette_y_size_cdf[bsize_ctx],
989
                       PALETTE_SIZES);
990
      write_palette_colors_y(xd, pmi, cm->bit_depth, w);
991 992 993
    }
  }

994
  const int uv_dc_pred =
995
      num_planes > 1 && mbmi->uv_mode == UV_DC_PRED &&
996 997
      is_chroma_reference(mi_row, mi_col, bsize, xd->plane[1].subsampling_x,
                          xd->plane[1].subsampling_y);
998
  if (uv_dc_pred) {
999 1000
    const int n = pmi->palette_size[1];
    const int palette_uv_mode_ctx = (pmi->palette_size[0] > 0);
1001 1002
    aom_write_symbol(w, n > 0,
                     xd->tile_ctx->palette_uv_mode_cdf[palette_uv_mode_ctx], 2);
1003
    if (n > 0) {
1004
      aom_write_symbol(w, n - PALETTE_MIN_SIZE,
1005
                       xd->tile_ctx->palette_uv_size_cdf[bsize_ctx],
1006
                       PALETTE_SIZES);
1007
      write_palette_colors_uv(xd, pmi, cm->bit_depth, w);
1008 1009 1010 1011
    }
  }
}

1012
void av1_write_tx_type(const AV1_COMMON *const cm, const MACROBLOCKD *xd,
Angie Chiang's avatar
Angie Chiang committed
1013
#if CONFIG_TXK_SEL
1014
                       int blk_row, int blk_col, int plane, TX_SIZE tx_size,
1015 1016 1017
#endif
                       aom_writer *w) {
  MB_MODE_INFO *mbmi = &xd->mi[0]->mbmi;
1018
  const int is_inter = is_inter_block(mbmi);
1019
#if !CONFIG_TXK_SEL
1020 1021
  const TX_SIZE mtx_size =
      get_max_rect_tx_size(xd->mi[0]->mbmi.sb_type, is_inter);
1022
  const TX_SIZE tx_size =
1023
      is_inter ? TXSIZEMAX(sub_tx_size_map[1][mtx_size], mbmi->min_tx_size)
1024
               : mbmi->tx_size;
1025
#endif  // !CONFIG_TXK_SEL
1026 1027
  FRAME_CONTEXT *ec_ctx = xd->tile_ctx;

Angie Chiang's avatar
Angie Chiang committed
1028
#if !CONFIG_TXK_SEL
1029 1030 1031
  TX_TYPE tx_type = mbmi->tx_type;
#else
  // Only y plane's tx_type is transmitted
1032 1033
  if (plane > 0) return;
  PLANE_TYPE plane_type = get_plane_type(plane);
1034 1035
  TX_TYPE tx_type = av1_get_tx_type(plane_type, xd, blk_row, blk_col, tx_size,
                                    cm->reduced_tx_set_used);
1036 1037
#endif

1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057
  const TX_SIZE square_tx_size = txsize_sqr_map[tx_size];
  const BLOCK_SIZE bsize = mbmi->sb_type;
  if (get_ext_tx_types(tx_size, bsize, is_inter, cm->reduced_tx_set_used) > 1 &&
      ((!cm->seg.enabled && cm->base_qindex > 0) ||
       (cm->seg.enabled && xd->qindex[mbmi->segment_id] > 0)) &&
      !mbmi->skip &&
      !segfeature_active(&cm->seg, mbmi->segment_id, SEG_LVL_SKIP)) {
    const TxSetType tx_set_type =
        get_ext_tx_set_type(tx_size, bsize, is_inter, cm->reduced_tx_set_used);
    const int eset =
        get_ext_tx_set(tx_size, bsize, is_inter, cm->reduced_tx_set_used);
    // eset == 0 should correspond to a set with only DCT_DCT and there
    // is no need to send the tx_type
    assert(eset > 0);
    assert(av1_ext_tx_used[tx_set_type][tx_type]);
    if (is_inter) {
      aom_write_symbol(w, av1_ext_tx_ind[tx_set_type][tx_type],
                       ec_ctx->inter_ext_tx_cdf[eset][square_tx_size],
                       av1_num_ext_tx_set[tx_set_type]);
    } else {
1058
#if CONFIG_FILTER_INTRA
1059 1060 1061 1062 1063 1064 1065 1066 1067 1068
      PREDICTION_MODE intra_dir;
      if (mbmi->filter_intra_mode_info.use_filter_intra)
        intra_dir =
            fimode_to_intradir[mbmi->filter_intra_mode_info.filter_intra_mode];
      else
        intra_dir = mbmi->mode;
      aom_write_symbol(
          w, av1_ext_tx_ind[tx_set_type][tx_type],
          ec_ctx->intra_ext_tx_cdf[eset][square_tx_size][intra_dir],
          av1_num_ext_tx_set[tx_set_type]);
1069
#else
1070 1071 1072 1073
      aom_write_symbol(
          w, av1_ext_tx_ind[tx_set_type][tx_type],
          ec_ctx->intra_ext_tx_cdf[eset][square_tx_size][mbmi->mode],
          av1_num_ext_tx_set[tx_set_type]);
1074
#endif
1075
    }
1076 1077 1078
  }
}

1079 1080
static void write_intra_mode(FRAME_CONTEXT *frame_ctx, BLOCK_SIZE bsize,
                             PREDICTION_MODE mode, aom_writer *w) {
1081
  aom_write_symbol(w, mode, frame_ctx->y_mode_cdf[size_group_lookup[bsize]],
1082 1083 1084 1085
                   INTRA_MODES);
}

static void write_intra_uv_mode(FRAME_CONTEXT *frame_ctx,
Luc Trudeau's avatar
Luc Trudeau committed
1086
                                UV_PREDICTION_MODE uv_mode,
1087 1088 1089
                                PREDICTION_MODE y_mode,
#if CONFIG_CFL
                                CFL_ALLOWED_TYPE cfl_allowed,
1090
#endif
1091 1092 1093 1094 1095 1096
                                aom_writer *w) {
#if CONFIG_CFL
  aom_write_symbol(w, uv_mode, frame_ctx->uv_mode_cdf[cfl_allowed][y_mode],
                   UV_INTRA_MODES - !cfl_allowed);
#else
  uv_mode = get_uv_mode(uv_mode);
1097
  aom_write_symbol(w, uv_mode, frame_ctx->uv_mode_cdf[y_mode], UV_INTRA_MODES);
1098
#endif
1099 1100
}

Luc Trudeau's avatar
Luc Trudeau committed
1101
#if CONFIG_CFL