vp9_decodframe.c 45.2 KB
Newer Older
John Koleszar's avatar
John Koleszar committed
1
/*
2
 *  Copyright (c) 2010 The WebM project authors. All Rights Reserved.
John Koleszar's avatar
John Koleszar committed
3
 *
4
 *  Use of this source code is governed by a BSD-style license
5
6
 *  that can be found in the LICENSE file in the root of the source
 *  tree. An additional intellectual property rights grant can be found
7
 *  in the file PATENTS.  All contributing project authors may
8
 *  be found in the AUTHORS file in the root of the source tree.
John Koleszar's avatar
John Koleszar committed
9
10
 */

11
#include <assert.h>
John Koleszar's avatar
John Koleszar committed
12

13
14
15
16
#include "./vp9_rtcd.h"
#include "vpx_mem/vpx_mem.h"
#include "vpx_scale/vpx_scale.h"

Dmitry Kovalev's avatar
Dmitry Kovalev committed
17
#include "vp9/common/vp9_alloccommon.h"
Ronald S. Bultje's avatar
Ronald S. Bultje committed
18
#include "vp9/common/vp9_common.h"
Yaowu Xu's avatar
Yaowu Xu committed
19
#include "vp9/common/vp9_entropy.h"
20
#include "vp9/common/vp9_entropymode.h"
Dmitry Kovalev's avatar
Dmitry Kovalev committed
21
#include "vp9/common/vp9_extend.h"
22
#include "vp9/common/vp9_idct.h"
Dmitry Kovalev's avatar
Dmitry Kovalev committed
23
#include "vp9/common/vp9_pred_common.h"
24
#include "vp9/common/vp9_quant_common.h"
Dmitry Kovalev's avatar
Dmitry Kovalev committed
25
26
#include "vp9/common/vp9_reconintra.h"
#include "vp9/common/vp9_reconinter.h"
27
28
#include "vp9/common/vp9_seg_common.h"
#include "vp9/common/vp9_tile_common.h"
29
30

#include "vp9/decoder/vp9_dboolhuff.h"
31
32
33
#include "vp9/decoder/vp9_decodframe.h"
#include "vp9/decoder/vp9_detokenize.h"
#include "vp9/decoder/vp9_decodemv.h"
34
#include "vp9/decoder/vp9_dsubexp.h"
35
36
#include "vp9/decoder/vp9_onyxd_int.h"
#include "vp9/decoder/vp9_read_bit_buffer.h"
37
#include "vp9/decoder/vp9_thread.h"
Dmitry Kovalev's avatar
Dmitry Kovalev committed
38
#include "vp9/decoder/vp9_treereader.h"
39

40
41
42
43
44
45
typedef struct TileWorkerData {
  VP9_COMMON *cm;
  vp9_reader bit_reader;
  DECLARE_ALIGNED(16, MACROBLOCKD, xd);
} TileWorkerData;

46
47
static int read_be32(const uint8_t *p) {
  return (p[0] << 24) | (p[1] << 16) | (p[2] << 8) | p[3];
48
49
}

50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
static int is_compound_prediction_allowed(const VP9_COMMON *cm) {
  int i;
  for (i = 1; i < ALLOWED_REFS_PER_FRAME; ++i)
    if  (cm->ref_frame_sign_bias[i + 1] != cm->ref_frame_sign_bias[1])
      return 1;

  return 0;
}

static void setup_compound_prediction(VP9_COMMON *cm) {
  if (cm->ref_frame_sign_bias[LAST_FRAME] ==
          cm->ref_frame_sign_bias[GOLDEN_FRAME]) {
    cm->comp_fixed_ref = ALTREF_FRAME;
    cm->comp_var_ref[0] = LAST_FRAME;
    cm->comp_var_ref[1] = GOLDEN_FRAME;
  } else if (cm->ref_frame_sign_bias[LAST_FRAME] ==
                 cm->ref_frame_sign_bias[ALTREF_FRAME]) {
    cm->comp_fixed_ref = GOLDEN_FRAME;
    cm->comp_var_ref[0] = LAST_FRAME;
    cm->comp_var_ref[1] = ALTREF_FRAME;
  } else {
    cm->comp_fixed_ref = LAST_FRAME;
    cm->comp_var_ref[0] = GOLDEN_FRAME;
    cm->comp_var_ref[1] = ALTREF_FRAME;
  }
}

77
// len == 0 is not allowed
78
static int read_is_valid(const uint8_t *start, size_t len, const uint8_t *end) {
79
80
81
  return start + len > start && start + len <= end;
}

82
83
84
85
86
static int decode_unsigned_max(struct vp9_read_bit_buffer *rb, int max) {
  const int data = vp9_rb_read_literal(rb, get_unsigned_bits(max));
  return data > max ? max : data;
}

87
88
89
90
91
static TX_MODE read_tx_mode(vp9_reader *r) {
  TX_MODE tx_mode = vp9_read_literal(r, 2);
  if (tx_mode == ALLOW_32X32)
    tx_mode += vp9_read_bit(r);
  return tx_mode;
92
93
}

94
static void read_tx_probs(struct tx_probs *tx_probs, vp9_reader *r) {
95
96
97
  int i, j;

  for (i = 0; i < TX_SIZE_CONTEXTS; ++i)
98
    for (j = 0; j < TX_SIZES - 3; ++j)
99
      vp9_diff_update_prob(r, &tx_probs->p8x8[i][j]);
100
101

  for (i = 0; i < TX_SIZE_CONTEXTS; ++i)
102
    for (j = 0; j < TX_SIZES - 2; ++j)
103
      vp9_diff_update_prob(r, &tx_probs->p16x16[i][j]);
104
105

  for (i = 0; i < TX_SIZE_CONTEXTS; ++i)
106
    for (j = 0; j < TX_SIZES - 1; ++j)
107
      vp9_diff_update_prob(r, &tx_probs->p32x32[i][j]);
John Koleszar's avatar
John Koleszar committed
108
109
}

110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
static void read_switchable_interp_probs(FRAME_CONTEXT *fc, vp9_reader *r) {
  int i, j;
  for (j = 0; j < SWITCHABLE_FILTERS + 1; ++j)
    for (i = 0; i < SWITCHABLE_FILTERS - 1; ++i)
      vp9_diff_update_prob(r, &fc->switchable_interp_prob[j][i]);
}

static void read_inter_mode_probs(FRAME_CONTEXT *fc, vp9_reader *r) {
  int i, j;
  for (i = 0; i < INTER_MODE_CONTEXTS; ++i)
    for (j = 0; j < INTER_MODES - 1; ++j)
      vp9_diff_update_prob(r, &fc->inter_mode_probs[i][j]);
}

static INLINE COMPPREDMODE_TYPE read_comp_pred_mode(vp9_reader *r) {
  COMPPREDMODE_TYPE mode = vp9_read_bit(r);
  if (mode)
    mode += vp9_read_bit(r);
  return mode;
}

static void read_comp_pred(VP9_COMMON *cm, vp9_reader *r) {
  int i;

134
135
136
137
138
  const int compound_allowed = is_compound_prediction_allowed(cm);
  cm->comp_pred_mode = compound_allowed ? read_comp_pred_mode(r)
                                        : SINGLE_PREDICTION_ONLY;
  if (compound_allowed)
    setup_compound_prediction(cm);
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199

  if (cm->comp_pred_mode == HYBRID_PREDICTION)
    for (i = 0; i < COMP_INTER_CONTEXTS; i++)
      vp9_diff_update_prob(r, &cm->fc.comp_inter_prob[i]);

  if (cm->comp_pred_mode != COMP_PREDICTION_ONLY)
    for (i = 0; i < REF_CONTEXTS; i++) {
      vp9_diff_update_prob(r, &cm->fc.single_ref_prob[i][0]);
      vp9_diff_update_prob(r, &cm->fc.single_ref_prob[i][1]);
    }

  if (cm->comp_pred_mode != SINGLE_PREDICTION_ONLY)
    for (i = 0; i < REF_CONTEXTS; i++)
      vp9_diff_update_prob(r, &cm->fc.comp_ref_prob[i]);
}

static void update_mv(vp9_reader *r, vp9_prob *p) {
  if (vp9_read(r, NMV_UPDATE_PROB))
    *p = (vp9_read_literal(r, 7) << 1) | 1;
}

static void read_mv_probs(vp9_reader *r, nmv_context *mvc, int allow_hp) {
  int i, j, k;

  for (j = 0; j < MV_JOINTS - 1; ++j)
    update_mv(r, &mvc->joints[j]);

  for (i = 0; i < 2; ++i) {
    nmv_component *const comp = &mvc->comps[i];

    update_mv(r, &comp->sign);

    for (j = 0; j < MV_CLASSES - 1; ++j)
      update_mv(r, &comp->classes[j]);

    for (j = 0; j < CLASS0_SIZE - 1; ++j)
      update_mv(r, &comp->class0[j]);

    for (j = 0; j < MV_OFFSET_BITS; ++j)
      update_mv(r, &comp->bits[j]);
  }

  for (i = 0; i < 2; ++i) {
    nmv_component *const comp = &mvc->comps[i];

    for (j = 0; j < CLASS0_SIZE; ++j)
      for (k = 0; k < 3; ++k)
        update_mv(r, &comp->class0_fp[j][k]);

    for (j = 0; j < 3; ++j)
      update_mv(r, &comp->fp[j]);
  }

  if (allow_hp) {
    for (i = 0; i < 2; ++i) {
      update_mv(r, &mvc->comps[i].class0_hp);
      update_mv(r, &mvc->comps[i].hp);
    }
  }
}

200
static void setup_plane_dequants(VP9_COMMON *cm, MACROBLOCKD *xd, int q_index) {
John Koleszar's avatar
John Koleszar committed
201
  int i;
202
  xd->plane[0].dequant = cm->y_dequant[q_index];
203

204
  for (i = 1; i < MAX_MB_PLANE; i++)
205
    xd->plane[i].dequant = cm->uv_dequant[q_index];
John Koleszar's avatar
John Koleszar committed
206
207
}

208
209
210
211
212
// Allocate storage for each tile column.
// TODO(jzern): when max_threads <= 1 the same storage could be used for each
// tile.
static void alloc_tile_storage(VP9D_COMP *pbi, int tile_cols) {
  VP9_COMMON *const cm = &pbi->common;
213
214
  const int aligned_mi_cols = mi_cols_aligned_to_sb(cm->mi_cols);
  int i, tile_col;
215
216
217
218
219

  CHECK_MEM_ERROR(cm, pbi->mi_streams,
                  vpx_realloc(pbi->mi_streams, tile_cols *
                              sizeof(*pbi->mi_streams)));
  for (tile_col = 0; tile_col < tile_cols; ++tile_col) {
James Zern's avatar
James Zern committed
220
221
222
    TileInfo tile;

    vp9_tile_init(&tile, cm, 0, tile_col);
223
    pbi->mi_streams[tile_col] =
James Zern's avatar
James Zern committed
224
        &cm->mi[cm->mi_rows * tile.mi_col_start];
225
  }
226

227
228
229
230
231
232
233
234
235
236
237
238
  // 2 contexts per 'mi unit', so that we have one context per 4x4 txfm
  // block where mi unit size is 8x8.
  CHECK_MEM_ERROR(cm, pbi->above_context[0],
                  vpx_realloc(pbi->above_context[0],
                              sizeof(*pbi->above_context[0]) * MAX_MB_PLANE *
                              2 * aligned_mi_cols));
  for (i = 1; i < MAX_MB_PLANE; ++i) {
    pbi->above_context[i] = pbi->above_context[0] +
                            i * sizeof(*pbi->above_context[0]) *
                            2 * aligned_mi_cols;
  }

239
240
241
242
243
  // This is sized based on the entire frame. Each tile operates within its
  // column bounds.
  CHECK_MEM_ERROR(cm, pbi->above_seg_context,
                  vpx_realloc(pbi->above_seg_context,
                              sizeof(*pbi->above_seg_context) *
244
                              aligned_mi_cols));
245
246
}

247
static void decode_block(int plane, int block, BLOCK_SIZE plane_bsize,
248
                         TX_SIZE tx_size, void *arg) {
249
  MACROBLOCKD* const xd = arg;
250
  struct macroblockd_plane *const pd = &xd->plane[plane];
251
  int16_t* const qcoeff = BLOCK_OFFSET(pd->qcoeff, block);
252
  const int stride = pd->dst.stride;
253
  const int eob = pd->eobs[block];
254
255
256
257
258
259
260
261
262
263
264
265
  if (eob > 0) {
    TX_TYPE tx_type;
    const int raster_block = txfrm_block_to_raster_block(plane_bsize, tx_size,
                                                         block);
    uint8_t* const dst = raster_block_offset_uint8(plane_bsize, raster_block,
                                                   pd->dst.buf, stride);
    switch (tx_size) {
      case TX_4X4:
        tx_type = get_tx_type_4x4(pd->plane_type, xd, raster_block);
        if (tx_type == DCT_DCT)
          xd->itxm_add(qcoeff, dst, stride, eob);
        else
266
          vp9_iht4x4_add(tx_type, qcoeff, dst, stride, eob);
267
268
269
        break;
      case TX_8X8:
        tx_type = get_tx_type_8x8(pd->plane_type, xd);
270
        vp9_iht8x8_add(tx_type, qcoeff, dst, stride, eob);
271
272
273
        break;
      case TX_16X16:
        tx_type = get_tx_type_16x16(pd->plane_type, xd);
274
        vp9_iht16x16_add(tx_type, qcoeff, dst, stride, eob);
275
276
277
        break;
      case TX_32X32:
        tx_type = DCT_DCT;
278
        vp9_idct32x32_add(qcoeff, dst, stride, eob);
279
280
281
282
283
284
        break;
      default:
        assert(!"Invalid transform size");
    }

    if (eob == 1) {
285
      vpx_memset(qcoeff, 0, 2 * sizeof(qcoeff[0]));
286
287
288
    } else {
      if (tx_type == DCT_DCT && tx_size <= TX_16X16 && eob <= 10)
        vpx_memset(qcoeff, 0, 4 * (4 << tx_size) * sizeof(qcoeff[0]));
289
      else
290
        vpx_memset(qcoeff, 0, (16 << (tx_size << 1)) * sizeof(qcoeff[0]));
291
    }
292
293
294
  }
}

295
static void decode_block_intra(int plane, int block, BLOCK_SIZE plane_bsize,
296
                               TX_SIZE tx_size, void *arg) {
297
  MACROBLOCKD* const xd = arg;
298
  struct macroblockd_plane *const pd = &xd->plane[plane];
299
  MODE_INFO *const mi = xd->mi_8x8[0];
300
301
302
  const int raster_block = txfrm_block_to_raster_block(plane_bsize, tx_size,
                                                       block);
  uint8_t* const dst = raster_block_offset_uint8(plane_bsize, raster_block,
303
                                                 pd->dst.buf, pd->dst.stride);
304
305
306
307
  const MB_PREDICTION_MODE mode = (plane == 0)
        ? ((mi->mbmi.sb_type < BLOCK_8X8) ? mi->bmi[raster_block].as_mode
                                          : mi->mbmi.mode)
        : mi->mbmi.uv_mode;
308

309
  if (xd->mb_to_right_edge < 0 || xd->mb_to_bottom_edge < 0)
310
    extend_for_intra(xd, plane_bsize, plane, block, tx_size);
311

312
313
314
  vp9_predict_intra_block(xd, raster_block >> tx_size,
                          b_width_log2(plane_bsize), tx_size, mode,
                          dst, pd->dst.stride, dst, pd->dst.stride);
315

Dmitry Kovalev's avatar
Dmitry Kovalev committed
316
317
  if (!mi->mbmi.skip_coeff)
    decode_block(plane, block, plane_bsize, tx_size, arg);
318
319
}

320
321
static int decode_tokens(VP9_COMMON *const cm, MACROBLOCKD *const xd,
                         BLOCK_SIZE bsize, vp9_reader *r) {
322
  MB_MODE_INFO *const mbmi = &xd->mi_8x8[0]->mbmi;
323

324
325
  if (mbmi->skip_coeff) {
    reset_skip_context(xd, bsize);
Dmitry Kovalev's avatar
Dmitry Kovalev committed
326
    return -1;
327
  } else {
328
329
330
    if (cm->seg.enabled)
      setup_plane_dequants(cm, xd, vp9_get_qindex(&cm->seg, mbmi->segment_id,
                                                  cm->base_qindex));
331

Dmitry Kovalev's avatar
Dmitry Kovalev committed
332
    // TODO(dkovalev) if (!vp9_reader_has_error(r))
333
    return vp9_decode_tokens(cm, xd, &cm->seg, r, bsize);
334
335
336
  }
}

337
static void set_offsets(VP9_COMMON *const cm, MACROBLOCKD *const xd,
James Zern's avatar
James Zern committed
338
                        const TileInfo *const tile,
339
                        BLOCK_SIZE bsize, int mi_row, int mi_col) {
Dmitry Kovalev's avatar
Dmitry Kovalev committed
340
341
342
  const int bh = num_8x8_blocks_high_lookup[bsize];
  const int bw = num_8x8_blocks_wide_lookup[bsize];
  const int offset = mi_row * cm->mode_info_stride + mi_col;
343

344
  xd->mode_info_stride = cm->mode_info_stride;
345
346
347
348
349

  xd->mi_8x8 = cm->mi_grid_visible + offset;
  xd->prev_mi_8x8 = cm->prev_mi_grid_visible + offset;

  // we are using the mode info context stream here
350
  xd->mi_8x8[0] = xd->mi_stream;
351
  xd->mi_8x8[0]->mbmi.sb_type = bsize;
352
  ++xd->mi_stream;
353

354
355
  // Special case: if prev_mi is NULL, the previous mode info context
  // cannot be used.
356
  xd->last_mi = cm->prev_mi ? xd->prev_mi_8x8[0] : NULL;
357

358
  set_skip_context(xd, xd->above_context, xd->left_context, mi_row, mi_col);
Ronald S. Bultje's avatar
Ronald S. Bultje committed
359

360
361
  // Distance of Mb to the various image edges. These are specified to 8th pel
  // as they are always compared to values that are in 1/8th pel units
James Zern's avatar
James Zern committed
362
  set_mi_row_col(xd, tile, mi_row, bh, mi_col, bw, cm->mi_rows, cm->mi_cols);
Ronald S. Bultje's avatar
Ronald S. Bultje committed
363

364
  setup_dst_planes(xd, get_frame_new_buffer(cm), mi_row, mi_col);
Ronald S. Bultje's avatar
Ronald S. Bultje committed
365
}
John Koleszar's avatar
John Koleszar committed
366

367
368
static void set_ref(VP9_COMMON *const cm, MACROBLOCKD *const xd,
                    int idx, int mi_row, int mi_col) {
369
  MB_MODE_INFO *const mbmi = &xd->mi_8x8[0]->mbmi;
370
  const int ref = mbmi->ref_frame[idx] - LAST_FRAME;
371
  const YV12_BUFFER_CONFIG *cfg = get_frame_ref_buffer(cm, ref);
Yunqing Wang's avatar
Yunqing Wang committed
372
373
  const struct scale_factors_common *sfc = &cm->active_ref_scale_comm[ref];
  if (!vp9_is_valid_scale(sfc))
374
375
376
    vpx_internal_error(&cm->error, VPX_CODEC_UNSUP_BITSTREAM,
                       "Invalid scale factors");

Yunqing Wang's avatar
Yunqing Wang committed
377
378
  xd->scale_factor[idx].sfc = sfc;
  setup_pre_planes(xd, idx, cfg, mi_row, mi_col, &xd->scale_factor[idx]);
379
  xd->corrupted |= cfg->corrupted;
Ronald S. Bultje's avatar
Ronald S. Bultje committed
380
}
John Koleszar's avatar
John Koleszar committed
381

382
static void decode_modes_b(VP9_COMMON *const cm, MACROBLOCKD *const xd,
James Zern's avatar
James Zern committed
383
                           const TileInfo *const tile,
384
                           int mi_row, int mi_col,
385
                           vp9_reader *r, BLOCK_SIZE bsize, int index) {
386
  const int less8x8 = bsize < BLOCK_8X8;
387
  MB_MODE_INFO *mbmi;
388
  int eobtotal;
389

390
  if (less8x8)
391
    if (index > 0)
392
      return;
Dmitry Kovalev's avatar
Dmitry Kovalev committed
393

James Zern's avatar
James Zern committed
394
395
  set_offsets(cm, xd, tile, bsize, mi_row, mi_col);
  vp9_read_mode_info(cm, xd, tile, mi_row, mi_col, r);
396

397
  if (less8x8)
398
    bsize = BLOCK_8X8;
399
400

  // Has to be called after set_offsets
401
  mbmi = &xd->mi_8x8[0]->mbmi;
402
  eobtotal = decode_tokens(cm, xd, bsize, r);
403

404
  if (!is_inter_block(mbmi)) {
405
406
    // Intra reconstruction
    foreach_transformed_block(xd, bsize, decode_block_intra, xd);
407
  } else {
408
    // Inter reconstruction
409
410
411
412
413
    const int decode_blocks = (eobtotal > 0);

    if (!less8x8) {
      assert(mbmi->sb_type == bsize);
      if (eobtotal == 0)
414
        mbmi->skip_coeff = 1;  // skip loopfilter
415
    }
416

417
    set_ref(cm, xd, 0, mi_row, mi_col);
Dmitry Kovalev's avatar
Dmitry Kovalev committed
418
    if (has_second_ref(mbmi))
419
      set_ref(cm, xd, 1, mi_row, mi_col);
420

421
422
    xd->subpix.filter_x = xd->subpix.filter_y =
        vp9_get_filter_kernel(mbmi->interp_filter);
423
    vp9_build_inter_predictors_sb(xd, mi_row, mi_col, bsize);
424
425
426

    if (decode_blocks)
      foreach_transformed_block(xd, bsize, decode_block, xd);
427
  }
428
  xd->corrupted |= vp9_reader_has_error(r);
429
430
}

431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
static PARTITION_TYPE read_partition(int hbs, int mi_rows, int mi_cols,
                                     int mi_row, int mi_col,
                                     vp9_prob probs[PARTITION_TYPES - 1],
                                     vp9_reader *r) {
  const int has_rows = (mi_row + hbs) < mi_rows;
  const int has_cols = (mi_col + hbs) < mi_cols;

  if (has_rows && has_cols)
    return treed_read(r, vp9_partition_tree, probs);
  else if (!has_rows && has_cols)
    return vp9_read(r, probs[1]) ? PARTITION_SPLIT : PARTITION_HORZ;
  else if (has_rows && !has_cols)
    return vp9_read(r, probs[2]) ? PARTITION_SPLIT : PARTITION_VERT;
  else
    return PARTITION_SPLIT;
}

448
static void decode_modes_sb(VP9_COMMON *const cm, MACROBLOCKD *const xd,
James Zern's avatar
James Zern committed
449
                            const TileInfo *const tile,
450
                            int mi_row, int mi_col,
451
                            vp9_reader* r, BLOCK_SIZE bsize, int index) {
Dmitry Kovalev's avatar
Dmitry Kovalev committed
452
  const int hbs = num_8x8_blocks_wide_lookup[bsize] / 2;
453
  PARTITION_TYPE partition = PARTITION_NONE;
454
  BLOCK_SIZE subsize;
455

456
  if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols)
457
458
    return;

459
  if (bsize < BLOCK_8X8) {
460
    if (index > 0)
461
      return;
462
  } else {
463
464
465
466
467
    const int ctx = partition_plane_context(xd->above_seg_context,
                                            xd->left_seg_context,
                                            mi_row, mi_col, bsize);
    partition = read_partition(hbs, cm->mi_rows, cm->mi_cols, mi_row, mi_col,
                               cm->fc.partition_prob[cm->frame_type][ctx], r);
468

469
    if (!cm->frame_parallel_decoding_mode)
470
      ++cm->counts.partition[ctx][partition];
471
472
  }

473
  subsize = get_subsize(bsize, partition);
474

475
476
  switch (partition) {
    case PARTITION_NONE:
James Zern's avatar
James Zern committed
477
      decode_modes_b(cm, xd, tile, mi_row, mi_col, r, subsize, 0);
478
479
      break;
    case PARTITION_HORZ:
James Zern's avatar
James Zern committed
480
      decode_modes_b(cm, xd, tile, mi_row, mi_col, r, subsize, 0);
481
      if (mi_row + hbs < cm->mi_rows)
James Zern's avatar
James Zern committed
482
        decode_modes_b(cm, xd, tile, mi_row + hbs, mi_col, r, subsize, 1);
483
484
      break;
    case PARTITION_VERT:
James Zern's avatar
James Zern committed
485
      decode_modes_b(cm, xd, tile, mi_row, mi_col, r, subsize, 0);
486
      if (mi_col + hbs < cm->mi_cols)
James Zern's avatar
James Zern committed
487
        decode_modes_b(cm, xd, tile, mi_row, mi_col + hbs, r, subsize, 1);
488
      break;
Dmitry Kovalev's avatar
Dmitry Kovalev committed
489
490
    case PARTITION_SPLIT: {
      int n;
491
      for (n = 0; n < 4; n++) {
Dmitry Kovalev's avatar
Dmitry Kovalev committed
492
        const int j = n >> 1, i = n & 1;
James Zern's avatar
James Zern committed
493
        decode_modes_sb(cm, xd, tile, mi_row + j * hbs, mi_col + i * hbs,
494
                        r, subsize, n);
495
      }
Dmitry Kovalev's avatar
Dmitry Kovalev committed
496
    } break;
497
    default:
498
      assert(!"Invalid partition type");
499
  }
500

501
  // update partition context
502
  if (bsize >= BLOCK_8X8 &&
503
      (bsize == BLOCK_8X8 || partition != PARTITION_SPLIT))
504
    update_partition_context(xd->above_seg_context, xd->left_seg_context,
505
                             mi_row, mi_col, subsize, bsize);
506
507
}

508
509
510
511
static void setup_token_decoder(const uint8_t *data,
                                const uint8_t *data_end,
                                size_t read_size,
                                struct vpx_internal_error_info *error_info,
512
                                vp9_reader *r) {
Dmitry Kovalev's avatar
Dmitry Kovalev committed
513
514
515
  // Validate the calculated partition length. If the buffer
  // described by the partition can't be fully read, then restrict
  // it to the portion that can be (for EC mode) or throw an error.
516
  if (!read_is_valid(data, read_size, data_end))
517
    vpx_internal_error(error_info, VPX_CODEC_CORRUPT_FRAME,
518
                       "Truncated packet or corrupt tile length");
John Koleszar's avatar
John Koleszar committed
519

520
  if (vp9_reader_init(r, data, read_size))
521
    vpx_internal_error(error_info, VPX_CODEC_MEM_ERROR,
John Koleszar's avatar
John Koleszar committed
522
                       "Failed to allocate bool decoder %d", 1);
John Koleszar's avatar
John Koleszar committed
523
524
}

525
static void read_coef_probs_common(vp9_coeff_probs_model *coef_probs,
526
                                   vp9_reader *r) {
527
528
529
530
531
532
533
534
535
  int i, j, k, l, m;

  if (vp9_read_bit(r))
    for (i = 0; i < BLOCK_TYPES; i++)
      for (j = 0; j < REF_TYPES; j++)
        for (k = 0; k < COEF_BANDS; k++)
          for (l = 0; l < PREV_COEF_CONTEXTS; l++)
            if (k > 0 || l < 3)
              for (m = 0; m < UNCONSTRAINED_NODES; m++)
536
                vp9_diff_update_prob(r, &coef_probs[i][j][k][l][m]);
537
}
538

539
static void read_coef_probs(FRAME_CONTEXT *fc, TX_MODE tx_mode,
540
                            vp9_reader *r) {
541
  read_coef_probs_common(fc->coef_probs[TX_4X4], r);
542

543
  if (tx_mode > ONLY_4X4)
544
    read_coef_probs_common(fc->coef_probs[TX_8X8], r);
Dmitry Kovalev's avatar
Dmitry Kovalev committed
545

546
  if (tx_mode > ALLOW_8X8)
547
    read_coef_probs_common(fc->coef_probs[TX_16X16], r);
Dmitry Kovalev's avatar
Dmitry Kovalev committed
548

549
  if (tx_mode > ALLOW_16X16)
550
    read_coef_probs_common(fc->coef_probs[TX_32X32], r);
551
552
}

553
554
static void setup_segmentation(struct segmentation *seg,
                               struct vp9_read_bit_buffer *rb) {
555
556
  int i, j;

557
558
  seg->update_map = 0;
  seg->update_data = 0;
559

560
561
  seg->enabled = vp9_rb_read_bit(rb);
  if (!seg->enabled)
562
563
564
    return;

  // Segmentation map update
565
566
  seg->update_map = vp9_rb_read_bit(rb);
  if (seg->update_map) {
Paul Wilkins's avatar
Paul Wilkins committed
567
    for (i = 0; i < SEG_TREE_PROBS; i++)
568
569
      seg->tree_probs[i] = vp9_rb_read_bit(rb) ? vp9_rb_read_literal(rb, 8)
                                               : MAX_PROB;
570

571
572
    seg->temporal_update = vp9_rb_read_bit(rb);
    if (seg->temporal_update) {
573
      for (i = 0; i < PREDICTION_PROBS; i++)
574
575
        seg->pred_probs[i] = vp9_rb_read_bit(rb) ? vp9_rb_read_literal(rb, 8)
                                                 : MAX_PROB;
576
577
    } else {
      for (i = 0; i < PREDICTION_PROBS; i++)
578
        seg->pred_probs[i] = MAX_PROB;
579
    }
580
  }
581

582
  // Segmentation data update
583
584
585
  seg->update_data = vp9_rb_read_bit(rb);
  if (seg->update_data) {
    seg->abs_delta = vp9_rb_read_bit(rb);
586

587
    vp9_clearall_segfeatures(seg);
588

Paul Wilkins's avatar
Paul Wilkins committed
589
    for (i = 0; i < MAX_SEGMENTS; i++) {
590
591
      for (j = 0; j < SEG_LVL_MAX; j++) {
        int data = 0;
592
        const int feature_enabled = vp9_rb_read_bit(rb);
593
        if (feature_enabled) {
594
          vp9_enable_segfeature(seg, i, j);
595
          data = decode_unsigned_max(rb, vp9_seg_feature_data_max(j));
596
          if (vp9_is_segfeature_signed(j))
597
            data = vp9_rb_read_bit(rb) ? -data : data;
598
        }
599
        vp9_set_segdata(seg, i, j, data);
600
601
602
603
604
      }
    }
  }
}

605
606
607
608
static void setup_loopfilter(struct loopfilter *lf,
                             struct vp9_read_bit_buffer *rb) {
  lf->filter_level = vp9_rb_read_literal(rb, 6);
  lf->sharpness_level = vp9_rb_read_literal(rb, 3);
609
610
611

  // Read in loop filter deltas applied at the MB level based on mode or ref
  // frame.
612
  lf->mode_ref_delta_update = 0;
613

614
615
616
617
  lf->mode_ref_delta_enabled = vp9_rb_read_bit(rb);
  if (lf->mode_ref_delta_enabled) {
    lf->mode_ref_delta_update = vp9_rb_read_bit(rb);
    if (lf->mode_ref_delta_update) {
618
619
      int i;

620
621
      for (i = 0; i < MAX_REF_LF_DELTAS; i++)
        if (vp9_rb_read_bit(rb))
622
          lf->ref_deltas[i] = vp9_rb_read_signed_literal(rb, 6);
623

624
625
      for (i = 0; i < MAX_MODE_LF_DELTAS; i++)
        if (vp9_rb_read_bit(rb))
626
          lf->mode_deltas[i] = vp9_rb_read_signed_literal(rb, 6);
627
628
629
630
    }
  }
}

631
632
static int read_delta_q(struct vp9_read_bit_buffer *rb, int *delta_q) {
  const int old = *delta_q;
633
  *delta_q = vp9_rb_read_bit(rb) ? vp9_rb_read_signed_literal(rb, 4) : 0;
634
635
  return old != *delta_q;
}
636

637
638
static void setup_quantization(VP9_COMMON *const cm, MACROBLOCKD *const xd,
                               struct vp9_read_bit_buffer *rb) {
639
  int update = 0;
640

641
642
643
644
645
646
  cm->base_qindex = vp9_rb_read_literal(rb, QINDEX_BITS);
  update |= read_delta_q(rb, &cm->y_dc_delta_q);
  update |= read_delta_q(rb, &cm->uv_dc_delta_q);
  update |= read_delta_q(rb, &cm->uv_ac_delta_q);
  if (update)
    vp9_init_dequantizer(cm);
647
648
649
650
651

  xd->lossless = cm->base_qindex == 0 &&
                 cm->y_dc_delta_q == 0 &&
                 cm->uv_dc_delta_q == 0 &&
                 cm->uv_ac_delta_q == 0;
652

653
  xd->itxm_add = xd->lossless ? vp9_iwht4x4_add : vp9_idct4x4_add;
654
655
}

656
657
658
659
660
661
static INTERPOLATION_TYPE read_interp_filter_type(
                              struct vp9_read_bit_buffer *rb) {
  const INTERPOLATION_TYPE literal_to_type[] = { EIGHTTAP_SMOOTH,
                                                 EIGHTTAP,
                                                 EIGHTTAP_SHARP,
                                                 BILINEAR };
662
  return vp9_rb_read_bit(rb) ? SWITCHABLE
663
                             : literal_to_type[vp9_rb_read_literal(rb, 2)];
664
665
}

666
static void read_frame_size(struct vp9_read_bit_buffer *rb,
667
                            int *width, int *height) {
668
669
  const int w = vp9_rb_read_literal(rb, 16) + 1;
  const int h = vp9_rb_read_literal(rb, 16) + 1;
670
671
  *width = w;
  *height = h;
672
673
}

674
static void setup_display_size(VP9_COMMON *cm, struct vp9_read_bit_buffer *rb) {
675
676
677
  cm->display_width = cm->width;
  cm->display_height = cm->height;
  if (vp9_rb_read_bit(rb))
678
    read_frame_size(rb, &cm->display_width, &cm->display_height);
679
}
680

681
682
static void apply_frame_size(VP9D_COMP *pbi, int width, int height) {
  VP9_COMMON *cm = &pbi->common;
683

684
  if (cm->width != width || cm->height != height) {
685
    if (!pbi->initial_width || !pbi->initial_height) {
686
687
      if (vp9_alloc_frame_buffers(cm, width, height))
        vpx_internal_error(&cm->error, VPX_CODEC_MEM_ERROR,
688
                           "Failed to allocate frame buffers");
689
690
      pbi->initial_width = width;
      pbi->initial_height = height;
691
692
    } else {
      if (width > pbi->initial_width)
693
        vpx_internal_error(&cm->error, VPX_CODEC_CORRUPT_FRAME,
694
                           "Frame width too large");
695

696
      if (height > pbi->initial_height)
697
        vpx_internal_error(&cm->error, VPX_CODEC_CORRUPT_FRAME,
698
                           "Frame height too large");
699
700
    }

701
702
    cm->width = width;
    cm->height = height;
703

704
    vp9_update_frame_size(cm);
705
  }
706

707
  vp9_realloc_frame_buffer(get_frame_new_buffer(cm), cm->width, cm->height,
708
                           cm->subsampling_x, cm->subsampling_y,
709
                           VP9BORDERINPIXELS);
710
711
}

712
713
714
static void setup_frame_size(VP9D_COMP *pbi,
                             struct vp9_read_bit_buffer *rb) {
  int width, height;
715
  read_frame_size(rb, &width, &height);
716
  apply_frame_size(pbi, width, height);
717
  setup_display_size(&pbi->common, rb);
718
719
}

720
721
722
723
724
725
726
727
static void setup_frame_size_with_refs(VP9D_COMP *pbi,
                                       struct vp9_read_bit_buffer *rb) {
  VP9_COMMON *const cm = &pbi->common;

  int width, height;
  int found = 0, i;
  for (i = 0; i < ALLOWED_REFS_PER_FRAME; ++i) {
    if (vp9_rb_read_bit(rb)) {
728
      YV12_BUFFER_CONFIG *const cfg = get_frame_ref_buffer(cm, i);
729
730
731
732
733
734
735
736
      width = cfg->y_crop_width;
      height = cfg->y_crop_height;
      found = 1;
      break;
    }
  }

  if (!found)
737
    read_frame_size(rb, &width, &height);
738

739
740
741
742
  if (!width || !height)
    vpx_internal_error(&cm->error, VPX_CODEC_CORRUPT_FRAME,
                       "Referenced frame with invalid size");

743
  apply_frame_size(pbi, width, height);
744
  setup_display_size(cm, rb);
745
746
}

747
748
static void setup_tile_context(VP9D_COMP *const pbi, MACROBLOCKD *const xd,
                               int tile_col) {
749
  int i;
750
  xd->mi_stream = pbi->mi_streams[tile_col];
751
752
753
754

  for (i = 0; i < MAX_MB_PLANE; ++i) {
    xd->above_context[i] = pbi->above_context[i];
  }
755
756
757
758
  // see note in alloc_tile_storage().
  xd->above_seg_context = pbi->above_seg_context;
}

James Zern's avatar
James Zern committed
759
760
static void decode_tile(VP9D_COMP *pbi, const TileInfo *const tile,
                        vp9_reader *r) {
761
  const int num_threads = pbi->oxcf.max_threads;
762
  VP9_COMMON *const cm = &pbi->common;
763
  int mi_row, mi_col;
764
765
  MACROBLOCKD *xd = &pbi->mb;

766
  if (pbi->do_loopfilter_inline) {
767
    LFWorkerData *const lf_data = (LFWorkerData*)pbi->lf_worker.data1;
768
    lf_data->frame_buffer = get_frame_new_buffer(cm);
769
770
771
772
    lf_data->cm = cm;
    lf_data->xd = pbi->mb;
    lf_data->stop = 0;
    lf_data->y_only = 0;
773
    vp9_loop_filter_frame_init(cm, cm->lf.filter_level);
774
775
  }

James Zern's avatar
James Zern committed
776
  for (mi_row = tile->mi_row_start; mi_row < tile->mi_row_end;
777
       mi_row += MI_BLOCK_SIZE) {
778
    // For a SB there are 2 left contexts, each pertaining to a MB row within
779
    vp9_zero(xd->left_context);
780
    vp9_zero(xd->left_seg_context);
James Zern's avatar
James Zern committed
781
    for (mi_col = tile->mi_col_start; mi_col < tile->mi_col_end;
Dmitry Kovalev's avatar
Dmitry Kovalev committed
782
         mi_col += MI_BLOCK_SIZE)
James Zern's avatar
James Zern committed
783
      decode_modes_sb(cm, xd, tile, mi_row, mi_col, r, BLOCK_64X64, 0);
784
785
786

    if (pbi->do_loopfilter_inline) {
      const int lf_start = mi_row - MI_BLOCK_SIZE;
787
      LFWorkerData *const lf_data = (LFWorkerData*)pbi->lf_worker.data1;
788

789
790
      // delay the loopfilter by 1 macroblock row.
      if (lf_start < 0) continue;
791

792
      // decoding has completed: finish up the loop filter in this thread.
James Zern's avatar
James Zern committed
793
      if (mi_row + MI_BLOCK_SIZE >= tile->mi_row_end) continue;
794

795
796
797
798
      vp9_worker_sync(&pbi->lf_worker);
      lf_data->start = lf_start;
      lf_data->stop = mi_row;
      if (num_threads > 1) {
799
800
        vp9_worker_launch(&pbi->lf_worker);
      } else {
801
        vp9_worker_execute(&pbi->lf_worker);
802
      }
803
804
805
806
    }
  }

  if (pbi->do_loopfilter_inline) {
807
    LFWorkerData *const lf_data = (LFWorkerData*)pbi->lf_worker.data1;
808

809
810
811
812
    vp9_worker_sync(&pbi->lf_worker);
    lf_data->start = lf_data->stop;
    lf_data->stop = cm->mi_rows;
    vp9_worker_execute(&pbi->lf_worker);
813
814
815
  }
}

816
static void setup_tile_info(VP9_COMMON *cm, struct vp9_read_bit_buffer *rb) {
Dmitry Kovalev's avatar
Dmitry Kovalev committed
817
818
  int min_log2_tile_cols, max_log2_tile_cols, max_ones;
  vp9_get_tile_n_bits(cm->mi_cols, &min_log2_tile_cols, &max_log2_tile_cols);
819

Dmitry Kovalev's avatar
Dmitry Kovalev committed
820
821
822
823
824
  // columns
  max_ones = max_log2_tile_cols - min_log2_tile_cols;
  cm->log2_tile_cols = min_log2_tile_cols;
  while (max_ones-- && vp9_rb_read_bit(rb))
    cm->log2_tile_cols++;
825

Dmitry Kovalev's avatar
Dmitry Kovalev committed
826
  // rows
827
828
829
830
831
  cm->log2_tile_rows = vp9_rb_read_bit(rb);
  if (cm->log2_tile_rows)
    cm->log2_tile_rows += vp9_rb_read_bit(rb);
}

James Zern's avatar
James Zern committed
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
// Reads the next tile returning its size and adjusting '*data' accordingly
// based on 'is_last'.
static size_t get_tile(const uint8_t *const data_end,
                       int is_last,
                       struct vpx_internal_error_info *error_info,
                       const uint8_t **data) {
  size_t size;

  if (!is_last) {
    if (!read_is_valid(*data, 4, data_end))
      vpx_internal_error(error_info, VPX_CODEC_CORRUPT_FRAME,
          "Truncated packet or corrupt tile length");

    size = read_be32(*data);
    *data += 4;
  } else {
    size = data_end - *data;
  }
  return size;
}

853
854
855
static const uint8_t *decode_tiles(VP9D_COMP *pbi, const uint8_t *data) {
  vp9_reader residual_bc;

856
  VP9_COMMON *const cm = &pbi->common;
857
  MACROBLOCKD *const xd = &pbi->mb;
858

859
  const uint8_t *const data_end = pbi->source + pbi->source_sz;
860
861
862
  const int aligned_mi_cols = mi_cols_aligned_to_sb(cm->mi_cols);
  const int tile_cols = 1 << cm->log2_tile_cols;
  const int tile_rows = 1 << cm->log2_tile_rows;