vp9_decodframe.c 40.5 KB
Newer Older
John Koleszar's avatar
John Koleszar committed
1
/*
2
 *  Copyright (c) 2010 The WebM project authors. All Rights Reserved.
John Koleszar's avatar
John Koleszar committed
3
 *
4
 *  Use of this source code is governed by a BSD-style license
5
6
 *  that can be found in the LICENSE file in the root of the source
 *  tree. An additional intellectual property rights grant can be found
7
 *  in the file PATENTS.  All contributing project authors may
8
 *  be found in the AUTHORS file in the root of the source tree.
John Koleszar's avatar
John Koleszar committed
9
10
 */

11
#include <assert.h>
John Koleszar's avatar
John Koleszar committed
12

13
14
15
16
#include "./vp9_rtcd.h"
#include "vpx_mem/vpx_mem.h"
#include "vpx_scale/vpx_scale.h"

Dmitry Kovalev's avatar
Dmitry Kovalev committed
17
#include "vp9/common/vp9_alloccommon.h"
Ronald S. Bultje's avatar
Ronald S. Bultje committed
18
#include "vp9/common/vp9_common.h"
Yaowu Xu's avatar
Yaowu Xu committed
19
#include "vp9/common/vp9_entropy.h"
20
#include "vp9/common/vp9_entropymode.h"
Dmitry Kovalev's avatar
Dmitry Kovalev committed
21
#include "vp9/common/vp9_extend.h"
22
#include "vp9/common/vp9_idct.h"
Dmitry Kovalev's avatar
Dmitry Kovalev committed
23
#include "vp9/common/vp9_pred_common.h"
24
#include "vp9/common/vp9_quant_common.h"
Dmitry Kovalev's avatar
Dmitry Kovalev committed
25
26
#include "vp9/common/vp9_reconintra.h"
#include "vp9/common/vp9_reconinter.h"
27
28
#include "vp9/common/vp9_seg_common.h"
#include "vp9/common/vp9_tile_common.h"
29
30

#include "vp9/decoder/vp9_dboolhuff.h"
31
32
33
#include "vp9/decoder/vp9_decodframe.h"
#include "vp9/decoder/vp9_detokenize.h"
#include "vp9/decoder/vp9_decodemv.h"
34
#include "vp9/decoder/vp9_dsubexp.h"
35
36
#include "vp9/decoder/vp9_onyxd_int.h"
#include "vp9/decoder/vp9_read_bit_buffer.h"
37
#include "vp9/decoder/vp9_thread.h"
Dmitry Kovalev's avatar
Dmitry Kovalev committed
38
#include "vp9/decoder/vp9_treereader.h"
39

40
41
static int read_be32(const uint8_t *p) {
  return (p[0] << 24) | (p[1] << 16) | (p[2] << 8) | p[3];
42
43
}

44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
static int is_compound_prediction_allowed(const VP9_COMMON *cm) {
  int i;
  for (i = 1; i < ALLOWED_REFS_PER_FRAME; ++i)
    if  (cm->ref_frame_sign_bias[i + 1] != cm->ref_frame_sign_bias[1])
      return 1;

  return 0;
}

static void setup_compound_prediction(VP9_COMMON *cm) {
  if (cm->ref_frame_sign_bias[LAST_FRAME] ==
          cm->ref_frame_sign_bias[GOLDEN_FRAME]) {
    cm->comp_fixed_ref = ALTREF_FRAME;
    cm->comp_var_ref[0] = LAST_FRAME;
    cm->comp_var_ref[1] = GOLDEN_FRAME;
  } else if (cm->ref_frame_sign_bias[LAST_FRAME] ==
                 cm->ref_frame_sign_bias[ALTREF_FRAME]) {
    cm->comp_fixed_ref = GOLDEN_FRAME;
    cm->comp_var_ref[0] = LAST_FRAME;
    cm->comp_var_ref[1] = ALTREF_FRAME;
  } else {
    cm->comp_fixed_ref = LAST_FRAME;
    cm->comp_var_ref[0] = GOLDEN_FRAME;
    cm->comp_var_ref[1] = ALTREF_FRAME;
  }
}

71
// len == 0 is not allowed
72
static int read_is_valid(const uint8_t *start, size_t len, const uint8_t *end) {
73
74
75
  return start + len > start && start + len <= end;
}

76
77
78
79
80
static int decode_unsigned_max(struct vp9_read_bit_buffer *rb, int max) {
  const int data = vp9_rb_read_literal(rb, get_unsigned_bits(max));
  return data > max ? max : data;
}

81
82
83
84
85
static TX_MODE read_tx_mode(vp9_reader *r) {
  TX_MODE tx_mode = vp9_read_literal(r, 2);
  if (tx_mode == ALLOW_32X32)
    tx_mode += vp9_read_bit(r);
  return tx_mode;
86
87
}

88
static void read_tx_probs(struct tx_probs *tx_probs, vp9_reader *r) {
89
90
91
  int i, j;

  for (i = 0; i < TX_SIZE_CONTEXTS; ++i)
92
    for (j = 0; j < TX_SIZES - 3; ++j)
93
      vp9_diff_update_prob(r, &tx_probs->p8x8[i][j]);
94
95

  for (i = 0; i < TX_SIZE_CONTEXTS; ++i)
96
    for (j = 0; j < TX_SIZES - 2; ++j)
97
      vp9_diff_update_prob(r, &tx_probs->p16x16[i][j]);
98
99

  for (i = 0; i < TX_SIZE_CONTEXTS; ++i)
100
    for (j = 0; j < TX_SIZES - 1; ++j)
101
      vp9_diff_update_prob(r, &tx_probs->p32x32[i][j]);
John Koleszar's avatar
John Koleszar committed
102
103
}

104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
static void read_switchable_interp_probs(FRAME_CONTEXT *fc, vp9_reader *r) {
  int i, j;
  for (j = 0; j < SWITCHABLE_FILTERS + 1; ++j)
    for (i = 0; i < SWITCHABLE_FILTERS - 1; ++i)
      vp9_diff_update_prob(r, &fc->switchable_interp_prob[j][i]);
}

static void read_inter_mode_probs(FRAME_CONTEXT *fc, vp9_reader *r) {
  int i, j;
  for (i = 0; i < INTER_MODE_CONTEXTS; ++i)
    for (j = 0; j < INTER_MODES - 1; ++j)
      vp9_diff_update_prob(r, &fc->inter_mode_probs[i][j]);
}

static INLINE COMPPREDMODE_TYPE read_comp_pred_mode(vp9_reader *r) {
  COMPPREDMODE_TYPE mode = vp9_read_bit(r);
  if (mode)
    mode += vp9_read_bit(r);
  return mode;
}

static void read_comp_pred(VP9_COMMON *cm, vp9_reader *r) {
  int i;

128
129
130
131
132
  const int compound_allowed = is_compound_prediction_allowed(cm);
  cm->comp_pred_mode = compound_allowed ? read_comp_pred_mode(r)
                                        : SINGLE_PREDICTION_ONLY;
  if (compound_allowed)
    setup_compound_prediction(cm);
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193

  if (cm->comp_pred_mode == HYBRID_PREDICTION)
    for (i = 0; i < COMP_INTER_CONTEXTS; i++)
      vp9_diff_update_prob(r, &cm->fc.comp_inter_prob[i]);

  if (cm->comp_pred_mode != COMP_PREDICTION_ONLY)
    for (i = 0; i < REF_CONTEXTS; i++) {
      vp9_diff_update_prob(r, &cm->fc.single_ref_prob[i][0]);
      vp9_diff_update_prob(r, &cm->fc.single_ref_prob[i][1]);
    }

  if (cm->comp_pred_mode != SINGLE_PREDICTION_ONLY)
    for (i = 0; i < REF_CONTEXTS; i++)
      vp9_diff_update_prob(r, &cm->fc.comp_ref_prob[i]);
}

static void update_mv(vp9_reader *r, vp9_prob *p) {
  if (vp9_read(r, NMV_UPDATE_PROB))
    *p = (vp9_read_literal(r, 7) << 1) | 1;
}

static void read_mv_probs(vp9_reader *r, nmv_context *mvc, int allow_hp) {
  int i, j, k;

  for (j = 0; j < MV_JOINTS - 1; ++j)
    update_mv(r, &mvc->joints[j]);

  for (i = 0; i < 2; ++i) {
    nmv_component *const comp = &mvc->comps[i];

    update_mv(r, &comp->sign);

    for (j = 0; j < MV_CLASSES - 1; ++j)
      update_mv(r, &comp->classes[j]);

    for (j = 0; j < CLASS0_SIZE - 1; ++j)
      update_mv(r, &comp->class0[j]);

    for (j = 0; j < MV_OFFSET_BITS; ++j)
      update_mv(r, &comp->bits[j]);
  }

  for (i = 0; i < 2; ++i) {
    nmv_component *const comp = &mvc->comps[i];

    for (j = 0; j < CLASS0_SIZE; ++j)
      for (k = 0; k < 3; ++k)
        update_mv(r, &comp->class0_fp[j][k]);

    for (j = 0; j < 3; ++j)
      update_mv(r, &comp->fp[j]);
  }

  if (allow_hp) {
    for (i = 0; i < 2; ++i) {
      update_mv(r, &mvc->comps[i].class0_hp);
      update_mv(r, &mvc->comps[i].hp);
    }
  }
}

194
static void setup_plane_dequants(VP9_COMMON *cm, MACROBLOCKD *xd, int q_index) {
John Koleszar's avatar
John Koleszar committed
195
  int i;
196
  xd->plane[0].dequant = cm->y_dequant[q_index];
197

198
  for (i = 1; i < MAX_MB_PLANE; i++)
199
    xd->plane[i].dequant = cm->uv_dequant[q_index];
John Koleszar's avatar
John Koleszar committed
200
201
}

202
203
204
205
206
// Allocate storage for each tile column.
// TODO(jzern): when max_threads <= 1 the same storage could be used for each
// tile.
static void alloc_tile_storage(VP9D_COMP *pbi, int tile_cols) {
  VP9_COMMON *const cm = &pbi->common;
207
208
  const int aligned_mi_cols = mi_cols_aligned_to_sb(cm->mi_cols);
  int i, tile_col;
209
210
211
212
213
214
215
216
217

  CHECK_MEM_ERROR(cm, pbi->mi_streams,
                  vpx_realloc(pbi->mi_streams, tile_cols *
                              sizeof(*pbi->mi_streams)));
  for (tile_col = 0; tile_col < tile_cols; ++tile_col) {
    vp9_get_tile_col_offsets(cm, tile_col);
    pbi->mi_streams[tile_col] =
        &cm->mi[cm->mi_rows * cm->cur_tile_mi_col_start];
  }
218

219
220
221
222
223
224
225
226
227
228
229
230
  // 2 contexts per 'mi unit', so that we have one context per 4x4 txfm
  // block where mi unit size is 8x8.
  CHECK_MEM_ERROR(cm, pbi->above_context[0],
                  vpx_realloc(pbi->above_context[0],
                              sizeof(*pbi->above_context[0]) * MAX_MB_PLANE *
                              2 * aligned_mi_cols));
  for (i = 1; i < MAX_MB_PLANE; ++i) {
    pbi->above_context[i] = pbi->above_context[0] +
                            i * sizeof(*pbi->above_context[0]) *
                            2 * aligned_mi_cols;
  }

231
232
233
234
235
  // This is sized based on the entire frame. Each tile operates within its
  // column bounds.
  CHECK_MEM_ERROR(cm, pbi->above_seg_context,
                  vpx_realloc(pbi->above_seg_context,
                              sizeof(*pbi->above_seg_context) *
236
                              aligned_mi_cols));
237
238
}

239
static void decode_block(int plane, int block, BLOCK_SIZE plane_bsize,
240
                         TX_SIZE tx_size, void *arg) {
241
  MACROBLOCKD* const xd = arg;
242
  struct macroblockd_plane *const pd = &xd->plane[plane];
243
  int16_t* const qcoeff = BLOCK_OFFSET(pd->qcoeff, block);
244
  const int stride = pd->dst.stride;
245
  const int eob = pd->eobs[block];
246
247
248
249
250
251
252
253
254
255
256
257
  if (eob > 0) {
    TX_TYPE tx_type;
    const int raster_block = txfrm_block_to_raster_block(plane_bsize, tx_size,
                                                         block);
    uint8_t* const dst = raster_block_offset_uint8(plane_bsize, raster_block,
                                                   pd->dst.buf, stride);
    switch (tx_size) {
      case TX_4X4:
        tx_type = get_tx_type_4x4(pd->plane_type, xd, raster_block);
        if (tx_type == DCT_DCT)
          xd->itxm_add(qcoeff, dst, stride, eob);
        else
258
          vp9_iht4x4_add(tx_type, qcoeff, dst, stride, eob);
259
260
261
        break;
      case TX_8X8:
        tx_type = get_tx_type_8x8(pd->plane_type, xd);
262
        vp9_iht8x8_add(tx_type, qcoeff, dst, stride, eob);
263
264
265
        break;
      case TX_16X16:
        tx_type = get_tx_type_16x16(pd->plane_type, xd);
266
        vp9_iht16x16_add(tx_type, qcoeff, dst, stride, eob);
267
268
269
        break;
      case TX_32X32:
        tx_type = DCT_DCT;
270
        vp9_idct32x32_add(qcoeff, dst, stride, eob);
271
272
273
274
275
276
277
278
279
280
        break;
      default:
        assert(!"Invalid transform size");
    }

    if (eob == 1) {
      *((int32_t *)qcoeff) = 0;
    } else {
      if (tx_type == DCT_DCT && tx_size <= TX_16X16 && eob <= 10)
        vpx_memset(qcoeff, 0, 4 * (4 << tx_size) * sizeof(qcoeff[0]));
281
      else
282
        vpx_memset(qcoeff, 0, (16 << (tx_size << 1)) * sizeof(qcoeff[0]));
283
    }
284
285
286
  }
}

287
static void decode_block_intra(int plane, int block, BLOCK_SIZE plane_bsize,
288
                               TX_SIZE tx_size, void *arg) {
289
  MACROBLOCKD* const xd = arg;
290
  struct macroblockd_plane *const pd = &xd->plane[plane];
291
  MODE_INFO *const mi = xd->mi_8x8[0];
292
293
294
  const int raster_block = txfrm_block_to_raster_block(plane_bsize, tx_size,
                                                       block);
  uint8_t* const dst = raster_block_offset_uint8(plane_bsize, raster_block,
295
                                                 pd->dst.buf, pd->dst.stride);
296
297
298
299
  const MB_PREDICTION_MODE mode = (plane == 0)
        ? ((mi->mbmi.sb_type < BLOCK_8X8) ? mi->bmi[raster_block].as_mode
                                          : mi->mbmi.mode)
        : mi->mbmi.uv_mode;
300

301
  if (xd->mb_to_right_edge < 0 || xd->mb_to_bottom_edge < 0)
302
    extend_for_intra(xd, plane_bsize, plane, block, tx_size);
303

304
305
306
  vp9_predict_intra_block(xd, raster_block >> tx_size,
                          b_width_log2(plane_bsize), tx_size, mode,
                          dst, pd->dst.stride, dst, pd->dst.stride);
307

Dmitry Kovalev's avatar
Dmitry Kovalev committed
308
309
  if (!mi->mbmi.skip_coeff)
    decode_block(plane, block, plane_bsize, tx_size, arg);
310
311
}

312
313
static int decode_tokens(VP9_COMMON *const cm, MACROBLOCKD *const xd,
                         BLOCK_SIZE bsize, vp9_reader *r) {
314
  MB_MODE_INFO *const mbmi = &xd->mi_8x8[0]->mbmi;
315

316
317
  if (mbmi->skip_coeff) {
    reset_skip_context(xd, bsize);
Dmitry Kovalev's avatar
Dmitry Kovalev committed
318
    return -1;
319
  } else {
320
321
322
    if (cm->seg.enabled)
      setup_plane_dequants(cm, xd, vp9_get_qindex(&cm->seg, mbmi->segment_id,
                                                  cm->base_qindex));
323

Dmitry Kovalev's avatar
Dmitry Kovalev committed
324
    // TODO(dkovalev) if (!vp9_reader_has_error(r))
325
    return vp9_decode_tokens(cm, xd, &cm->seg, r, bsize);
326
327
328
  }
}

329
330
static void set_offsets(VP9_COMMON *const cm, MACROBLOCKD *const xd,
                        BLOCK_SIZE bsize, int mi_row, int mi_col) {
Dmitry Kovalev's avatar
Dmitry Kovalev committed
331
332
333
  const int bh = num_8x8_blocks_high_lookup[bsize];
  const int bw = num_8x8_blocks_wide_lookup[bsize];
  const int offset = mi_row * cm->mode_info_stride + mi_col;
334

335
  xd->mode_info_stride = cm->mode_info_stride;
336
337
338
339
340

  xd->mi_8x8 = cm->mi_grid_visible + offset;
  xd->prev_mi_8x8 = cm->prev_mi_grid_visible + offset;

  // we are using the mode info context stream here
341
  xd->mi_8x8[0] = xd->mi_stream;
342
  xd->mi_8x8[0]->mbmi.sb_type = bsize;
343
  ++xd->mi_stream;
344

345
346
  // Special case: if prev_mi is NULL, the previous mode info context
  // cannot be used.
347
  xd->last_mi = cm->prev_mi ? xd->prev_mi_8x8[0] : NULL;
348

349
  set_skip_context(xd, xd->above_context, xd->left_context, mi_row, mi_col);
Ronald S. Bultje's avatar
Ronald S. Bultje committed
350

351
352
  // Distance of Mb to the various image edges. These are specified to 8th pel
  // as they are always compared to values that are in 1/8th pel units
353
  set_mi_row_col(cm, xd, mi_row, bh, mi_col, bw);
Ronald S. Bultje's avatar
Ronald S. Bultje committed
354

355
  setup_dst_planes(xd, get_frame_new_buffer(cm), mi_row, mi_col);
Ronald S. Bultje's avatar
Ronald S. Bultje committed
356
}
John Koleszar's avatar
John Koleszar committed
357

358
359
static void set_ref(VP9_COMMON *const cm, MACROBLOCKD *const xd,
                    int idx, int mi_row, int mi_col) {
360
  MB_MODE_INFO *const mbmi = &xd->mi_8x8[0]->mbmi;
361
  const int ref = mbmi->ref_frame[idx] - LAST_FRAME;
362
  const YV12_BUFFER_CONFIG *cfg = get_frame_ref_buffer(cm, ref);
Yunqing Wang's avatar
Yunqing Wang committed
363
364
  const struct scale_factors_common *sfc = &cm->active_ref_scale_comm[ref];
  if (!vp9_is_valid_scale(sfc))
365
366
367
    vpx_internal_error(&cm->error, VPX_CODEC_UNSUP_BITSTREAM,
                       "Invalid scale factors");

Yunqing Wang's avatar
Yunqing Wang committed
368
369
  xd->scale_factor[idx].sfc = sfc;
  setup_pre_planes(xd, idx, cfg, mi_row, mi_col, &xd->scale_factor[idx]);
370
  xd->corrupted |= cfg->corrupted;
Ronald S. Bultje's avatar
Ronald S. Bultje committed
371
}
John Koleszar's avatar
John Koleszar committed
372

373
374
static void decode_modes_b(VP9_COMMON *const cm, MACROBLOCKD *const xd,
                           int mi_row, int mi_col,
375
                           vp9_reader *r, BLOCK_SIZE bsize, int index) {
376
  const int less8x8 = bsize < BLOCK_8X8;
377
  MB_MODE_INFO *mbmi;
378
  int eobtotal;
379

380
  if (less8x8)
381
    if (index > 0)
382
      return;
Dmitry Kovalev's avatar
Dmitry Kovalev committed
383

384
  set_offsets(cm, xd, bsize, mi_row, mi_col);
385
  vp9_read_mode_info(cm, xd, mi_row, mi_col, r);
386

387
  if (less8x8)
388
    bsize = BLOCK_8X8;
389
390

  // Has to be called after set_offsets
391
  mbmi = &xd->mi_8x8[0]->mbmi;
392
  eobtotal = decode_tokens(cm, xd, bsize, r);
393

394
  if (!is_inter_block(mbmi)) {
395
396
    // Intra reconstruction
    foreach_transformed_block(xd, bsize, decode_block_intra, xd);
397
  } else {
398
    // Inter reconstruction
399
400
401
402
403
    const int decode_blocks = (eobtotal > 0);

    if (!less8x8) {
      assert(mbmi->sb_type == bsize);
      if (eobtotal == 0)
404
        mbmi->skip_coeff = 1;  // skip loopfilter
405
    }
406

407
    set_ref(cm, xd, 0, mi_row, mi_col);
Dmitry Kovalev's avatar
Dmitry Kovalev committed
408
    if (has_second_ref(mbmi))
409
      set_ref(cm, xd, 1, mi_row, mi_col);
410

411
412
    xd->subpix.filter_x = xd->subpix.filter_y =
        vp9_get_filter_kernel(mbmi->interp_filter);
413
    vp9_build_inter_predictors_sb(xd, mi_row, mi_col, bsize);
414
415
416

    if (decode_blocks)
      foreach_transformed_block(xd, bsize, decode_block, xd);
417
  }
418
  xd->corrupted |= vp9_reader_has_error(r);
419
420
}

421
422
static void decode_modes_sb(VP9_COMMON *const cm, MACROBLOCKD *const xd,
                            int mi_row, int mi_col,
423
                            vp9_reader* r, BLOCK_SIZE bsize, int index) {
Dmitry Kovalev's avatar
Dmitry Kovalev committed
424
  const int hbs = num_8x8_blocks_wide_lookup[bsize] / 2;
425
  PARTITION_TYPE partition = PARTITION_NONE;
426
  BLOCK_SIZE subsize;
427

428
  if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols)
429
430
    return;

431
  if (bsize < BLOCK_8X8) {
432
    if (index > 0)
433
      return;
434
  } else {
435
    int pl;
436
    const int idx = check_bsize_coverage(hbs, cm->mi_rows, cm->mi_cols,
Dmitry Kovalev's avatar
Dmitry Kovalev committed
437
                                         mi_row, mi_col);
438
    pl = partition_plane_context(xd->above_seg_context, xd->left_seg_context,
439
                                 mi_row, mi_col, bsize);
440
441
442

    if (idx == 0)
      partition = treed_read(r, vp9_partition_tree,
443
                             cm->fc.partition_prob[cm->frame_type][pl]);
444
    else if (idx > 0 &&
445
        !vp9_read(r, cm->fc.partition_prob[cm->frame_type][pl][idx]))
446
447
448
449
      partition = (idx == 1) ? PARTITION_HORZ : PARTITION_VERT;
    else
      partition = PARTITION_SPLIT;

450
451
    if (!cm->frame_parallel_decoding_mode)
      ++cm->counts.partition[pl][partition];
452
453
  }

454
  subsize = get_subsize(bsize, partition);
455

456
457
  switch (partition) {
    case PARTITION_NONE:
458
      decode_modes_b(cm, xd, mi_row, mi_col, r, subsize, 0);
459
460
      break;
    case PARTITION_HORZ:
461
      decode_modes_b(cm, xd, mi_row, mi_col, r, subsize, 0);
462
      if (mi_row + hbs < cm->mi_rows)
463
        decode_modes_b(cm, xd, mi_row + hbs, mi_col, r, subsize, 1);
464
465
      break;
    case PARTITION_VERT:
466
      decode_modes_b(cm, xd, mi_row, mi_col, r, subsize, 0);
467
      if (mi_col + hbs < cm->mi_cols)
468
        decode_modes_b(cm, xd, mi_row, mi_col + hbs, r, subsize, 1);
469
      break;
Dmitry Kovalev's avatar
Dmitry Kovalev committed
470
471
    case PARTITION_SPLIT: {
      int n;
472
      for (n = 0; n < 4; n++) {
Dmitry Kovalev's avatar
Dmitry Kovalev committed
473
        const int j = n >> 1, i = n & 1;
474
        decode_modes_sb(cm, xd, mi_row + j * hbs, mi_col + i * hbs,
475
                        r, subsize, n);
476
      }
Dmitry Kovalev's avatar
Dmitry Kovalev committed
477
    } break;
478
    default:
479
      assert(!"Invalid partition type");
480
  }
481

482
  // update partition context
483
  if (bsize >= BLOCK_8X8 &&
484
      (bsize == BLOCK_8X8 || partition != PARTITION_SPLIT))
485
    update_partition_context(xd->above_seg_context, xd->left_seg_context,
486
                             mi_row, mi_col, subsize, bsize);
487
488
}

489
490
491
492
static void setup_token_decoder(const uint8_t *data,
                                const uint8_t *data_end,
                                size_t read_size,
                                struct vpx_internal_error_info *error_info,
493
                                vp9_reader *r) {
Dmitry Kovalev's avatar
Dmitry Kovalev committed
494
495
496
  // Validate the calculated partition length. If the buffer
  // described by the partition can't be fully read, then restrict
  // it to the portion that can be (for EC mode) or throw an error.
497
  if (!read_is_valid(data, read_size, data_end))
498
    vpx_internal_error(error_info, VPX_CODEC_CORRUPT_FRAME,
499
                       "Truncated packet or corrupt tile length");
John Koleszar's avatar
John Koleszar committed
500

501
  if (vp9_reader_init(r, data, read_size))
502
    vpx_internal_error(error_info, VPX_CODEC_MEM_ERROR,
John Koleszar's avatar
John Koleszar committed
503
                       "Failed to allocate bool decoder %d", 1);
John Koleszar's avatar
John Koleszar committed
504
505
}

506
static void read_coef_probs_common(vp9_coeff_probs_model *coef_probs,
507
                                   vp9_reader *r) {
508
509
510
511
512
513
514
515
516
  int i, j, k, l, m;

  if (vp9_read_bit(r))
    for (i = 0; i < BLOCK_TYPES; i++)
      for (j = 0; j < REF_TYPES; j++)
        for (k = 0; k < COEF_BANDS; k++)
          for (l = 0; l < PREV_COEF_CONTEXTS; l++)
            if (k > 0 || l < 3)
              for (m = 0; m < UNCONSTRAINED_NODES; m++)
517
                vp9_diff_update_prob(r, &coef_probs[i][j][k][l][m]);
518
}
519

520
static void read_coef_probs(FRAME_CONTEXT *fc, TX_MODE tx_mode,
521
                            vp9_reader *r) {
522
  read_coef_probs_common(fc->coef_probs[TX_4X4], r);
523

524
  if (tx_mode > ONLY_4X4)
525
    read_coef_probs_common(fc->coef_probs[TX_8X8], r);
Dmitry Kovalev's avatar
Dmitry Kovalev committed
526

527
  if (tx_mode > ALLOW_8X8)
528
    read_coef_probs_common(fc->coef_probs[TX_16X16], r);
Dmitry Kovalev's avatar
Dmitry Kovalev committed
529

530
  if (tx_mode > ALLOW_16X16)
531
    read_coef_probs_common(fc->coef_probs[TX_32X32], r);
532
533
}

534
535
static void setup_segmentation(struct segmentation *seg,
                               struct vp9_read_bit_buffer *rb) {
536
537
  int i, j;

538
539
  seg->update_map = 0;
  seg->update_data = 0;
540

541
542
  seg->enabled = vp9_rb_read_bit(rb);
  if (!seg->enabled)
543
544
545
    return;

  // Segmentation map update
546
547
  seg->update_map = vp9_rb_read_bit(rb);
  if (seg->update_map) {
Paul Wilkins's avatar
Paul Wilkins committed
548
    for (i = 0; i < SEG_TREE_PROBS; i++)
549
550
      seg->tree_probs[i] = vp9_rb_read_bit(rb) ? vp9_rb_read_literal(rb, 8)
                                               : MAX_PROB;
551

552
553
    seg->temporal_update = vp9_rb_read_bit(rb);
    if (seg->temporal_update) {
554
      for (i = 0; i < PREDICTION_PROBS; i++)
555
556
        seg->pred_probs[i] = vp9_rb_read_bit(rb) ? vp9_rb_read_literal(rb, 8)
                                                 : MAX_PROB;
557
558
    } else {
      for (i = 0; i < PREDICTION_PROBS; i++)
559
        seg->pred_probs[i] = MAX_PROB;
560
    }
561
  }
562

563
  // Segmentation data update
564
565
566
  seg->update_data = vp9_rb_read_bit(rb);
  if (seg->update_data) {
    seg->abs_delta = vp9_rb_read_bit(rb);
567

568
    vp9_clearall_segfeatures(seg);
569

Paul Wilkins's avatar
Paul Wilkins committed
570
    for (i = 0; i < MAX_SEGMENTS; i++) {
571
572
      for (j = 0; j < SEG_LVL_MAX; j++) {
        int data = 0;
573
        const int feature_enabled = vp9_rb_read_bit(rb);
574
        if (feature_enabled) {
575
          vp9_enable_segfeature(seg, i, j);
576
          data = decode_unsigned_max(rb, vp9_seg_feature_data_max(j));
577
          if (vp9_is_segfeature_signed(j))
578
            data = vp9_rb_read_bit(rb) ? -data : data;
579
        }
580
        vp9_set_segdata(seg, i, j, data);
581
582
583
584
585
      }
    }
  }
}

586
587
588
589
static void setup_loopfilter(struct loopfilter *lf,
                             struct vp9_read_bit_buffer *rb) {
  lf->filter_level = vp9_rb_read_literal(rb, 6);
  lf->sharpness_level = vp9_rb_read_literal(rb, 3);
590
591
592

  // Read in loop filter deltas applied at the MB level based on mode or ref
  // frame.
593
  lf->mode_ref_delta_update = 0;
594

595
596
597
598
  lf->mode_ref_delta_enabled = vp9_rb_read_bit(rb);
  if (lf->mode_ref_delta_enabled) {
    lf->mode_ref_delta_update = vp9_rb_read_bit(rb);
    if (lf->mode_ref_delta_update) {
599
600
      int i;

601
602
      for (i = 0; i < MAX_REF_LF_DELTAS; i++)
        if (vp9_rb_read_bit(rb))
603
          lf->ref_deltas[i] = vp9_rb_read_signed_literal(rb, 6);
604

605
606
      for (i = 0; i < MAX_MODE_LF_DELTAS; i++)
        if (vp9_rb_read_bit(rb))
607
          lf->mode_deltas[i] = vp9_rb_read_signed_literal(rb, 6);
608
609
610
611
    }
  }
}

612
613
static int read_delta_q(struct vp9_read_bit_buffer *rb, int *delta_q) {
  const int old = *delta_q;
614
  *delta_q = vp9_rb_read_bit(rb) ? vp9_rb_read_signed_literal(rb, 4) : 0;
615
616
  return old != *delta_q;
}
617

618
619
static void setup_quantization(VP9_COMMON *const cm, MACROBLOCKD *const xd,
                               struct vp9_read_bit_buffer *rb) {
620
  int update = 0;
621

622
623
624
625
626
627
  cm->base_qindex = vp9_rb_read_literal(rb, QINDEX_BITS);
  update |= read_delta_q(rb, &cm->y_dc_delta_q);
  update |= read_delta_q(rb, &cm->uv_dc_delta_q);
  update |= read_delta_q(rb, &cm->uv_ac_delta_q);
  if (update)
    vp9_init_dequantizer(cm);
628
629
630
631
632

  xd->lossless = cm->base_qindex == 0 &&
                 cm->y_dc_delta_q == 0 &&
                 cm->uv_dc_delta_q == 0 &&
                 cm->uv_ac_delta_q == 0;
633

634
  xd->itxm_add = xd->lossless ? vp9_iwht4x4_add : vp9_idct4x4_add;
635
636
}

637
638
639
640
641
642
static INTERPOLATION_TYPE read_interp_filter_type(
                              struct vp9_read_bit_buffer *rb) {
  const INTERPOLATION_TYPE literal_to_type[] = { EIGHTTAP_SMOOTH,
                                                 EIGHTTAP,
                                                 EIGHTTAP_SHARP,
                                                 BILINEAR };
643
  return vp9_rb_read_bit(rb) ? SWITCHABLE
644
                             : literal_to_type[vp9_rb_read_literal(rb, 2)];
645
646
}

647
static void read_frame_size(struct vp9_read_bit_buffer *rb,
648
                            int *width, int *height) {
649
650
  const int w = vp9_rb_read_literal(rb, 16) + 1;
  const int h = vp9_rb_read_literal(rb, 16) + 1;
651
652
  *width = w;
  *height = h;
653
654
}

655
static void setup_display_size(VP9_COMMON *cm, struct vp9_read_bit_buffer *rb) {
656
657
658
  cm->display_width = cm->width;
  cm->display_height = cm->height;
  if (vp9_rb_read_bit(rb))
659
    read_frame_size(rb, &cm->display_width, &cm->display_height);
660
}
661

662
663
static void apply_frame_size(VP9D_COMP *pbi, int width, int height) {
  VP9_COMMON *cm = &pbi->common;
664

665
  if (cm->width != width || cm->height != height) {
666
    if (!pbi->initial_width || !pbi->initial_height) {
667
668
      if (vp9_alloc_frame_buffers(cm, width, height))
        vpx_internal_error(&cm->error, VPX_CODEC_MEM_ERROR,
669
                           "Failed to allocate frame buffers");
670
671
      pbi->initial_width = width;
      pbi->initial_height = height;
672
673
    } else {
      if (width > pbi->initial_width)
674
        vpx_internal_error(&cm->error, VPX_CODEC_CORRUPT_FRAME,
675
                           "Frame width too large");
676

677
      if (height > pbi->initial_height)
678
        vpx_internal_error(&cm->error, VPX_CODEC_CORRUPT_FRAME,
679
                           "Frame height too large");
680
681
    }

682
683
    cm->width = width;
    cm->height = height;
684

685
    vp9_update_frame_size(cm);
686
  }
687

688
  vp9_realloc_frame_buffer(get_frame_new_buffer(cm), cm->width, cm->height,
689
                           cm->subsampling_x, cm->subsampling_y,
690
                           VP9BORDERINPIXELS);
691
692
}

693
694
695
static void setup_frame_size(VP9D_COMP *pbi,
                             struct vp9_read_bit_buffer *rb) {
  int width, height;
696
  read_frame_size(rb, &width, &height);
697
  apply_frame_size(pbi, width, height);
698
  setup_display_size(&pbi->common, rb);
699
700
}

701
702
703
704
705
706
707
708
static void setup_frame_size_with_refs(VP9D_COMP *pbi,
                                       struct vp9_read_bit_buffer *rb) {
  VP9_COMMON *const cm = &pbi->common;

  int width, height;
  int found = 0, i;
  for (i = 0; i < ALLOWED_REFS_PER_FRAME; ++i) {
    if (vp9_rb_read_bit(rb)) {
709
      YV12_BUFFER_CONFIG *const cfg = get_frame_ref_buffer(cm, i);
710
711
712
713
714
715
716
717
      width = cfg->y_crop_width;
      height = cfg->y_crop_height;
      found = 1;
      break;
    }
  }

  if (!found)
718
    read_frame_size(rb, &width, &height);
719

720
721
722
723
  if (!width || !height)
    vpx_internal_error(&cm->error, VPX_CODEC_CORRUPT_FRAME,
                       "Referenced frame with invalid size");

724
  apply_frame_size(pbi, width, height);
725
  setup_display_size(cm, rb);
726
727
}

728
729
static void setup_tile_context(VP9D_COMP *const pbi, MACROBLOCKD *const xd,
                               int tile_col) {
730
  int i;
731
  xd->mi_stream = pbi->mi_streams[tile_col];
732
733
734
735

  for (i = 0; i < MAX_MB_PLANE; ++i) {
    xd->above_context[i] = pbi->above_context[i];
  }
736
737
738
739
740
  // see note in alloc_tile_storage().
  xd->above_seg_context = pbi->above_seg_context;
}

static void decode_tile(VP9D_COMP *pbi, vp9_reader *r) {
741
  const int num_threads = pbi->oxcf.max_threads;
742
  VP9_COMMON *const cm = &pbi->common;
743
  int mi_row, mi_col;
744
745
  MACROBLOCKD *xd = &pbi->mb;

746
  if (pbi->do_loopfilter_inline) {
747
    LFWorkerData *const lf_data = (LFWorkerData*)pbi->lf_worker.data1;
748
    lf_data->frame_buffer = get_frame_new_buffer(cm);
749
750
751
752
    lf_data->cm = cm;
    lf_data->xd = pbi->mb;
    lf_data->stop = 0;
    lf_data->y_only = 0;
753
    vp9_loop_filter_frame_init(cm, cm->lf.filter_level);
754
755
  }

756
  for (mi_row = cm->cur_tile_mi_row_start; mi_row < cm->cur_tile_mi_row_end;
757
       mi_row += MI_BLOCK_SIZE) {
758
    // For a SB there are 2 left contexts, each pertaining to a MB row within
759
    vp9_zero(xd->left_context);
760
    vp9_zero(xd->left_seg_context);
761
    for (mi_col = cm->cur_tile_mi_col_start; mi_col < cm->cur_tile_mi_col_end;
Dmitry Kovalev's avatar
Dmitry Kovalev committed
762
         mi_col += MI_BLOCK_SIZE)
763
      decode_modes_sb(cm, xd, mi_row, mi_col, r, BLOCK_64X64, 0);
764
765
766

    if (pbi->do_loopfilter_inline) {
      const int lf_start = mi_row - MI_BLOCK_SIZE;
767
      LFWorkerData *const lf_data = (LFWorkerData*)pbi->lf_worker.data1;
768

769
770
      // delay the loopfilter by 1 macroblock row.
      if (lf_start < 0) continue;
771

772
773
      // decoding has completed: finish up the loop filter in this thread.
      if (mi_row + MI_BLOCK_SIZE >= cm->cur_tile_mi_row_end) continue;
774

775
776
777
778
      vp9_worker_sync(&pbi->lf_worker);
      lf_data->start = lf_start;
      lf_data->stop = mi_row;
      if (num_threads > 1) {
779
780
        vp9_worker_launch(&pbi->lf_worker);
      } else {
781
        vp9_worker_execute(&pbi->lf_worker);
782
      }
783
784
785
786
    }
  }

  if (pbi->do_loopfilter_inline) {
787
    LFWorkerData *const lf_data = (LFWorkerData*)pbi->lf_worker.data1;
788

789
790
791
792
    vp9_worker_sync(&pbi->lf_worker);
    lf_data->start = lf_data->stop;
    lf_data->stop = cm->mi_rows;
    vp9_worker_execute(&pbi->lf_worker);
793
794
795
  }
}

796
static void setup_tile_info(VP9_COMMON *cm, struct vp9_read_bit_buffer *rb) {
Dmitry Kovalev's avatar
Dmitry Kovalev committed
797
798
  int min_log2_tile_cols, max_log2_tile_cols, max_ones;
  vp9_get_tile_n_bits(cm->mi_cols, &min_log2_tile_cols, &max_log2_tile_cols);
799

Dmitry Kovalev's avatar
Dmitry Kovalev committed
800
801
802
803
804
  // columns
  max_ones = max_log2_tile_cols - min_log2_tile_cols;
  cm->log2_tile_cols = min_log2_tile_cols;
  while (max_ones-- && vp9_rb_read_bit(rb))
    cm->log2_tile_cols++;
805

Dmitry Kovalev's avatar
Dmitry Kovalev committed
806
  // rows
807
808
809
810
811
  cm->log2_tile_rows = vp9_rb_read_bit(rb);
  if (cm->log2_tile_rows)
    cm->log2_tile_rows += vp9_rb_read_bit(rb);
}

812
813
814
static const uint8_t *decode_tiles(VP9D_COMP *pbi, const uint8_t *data) {
  vp9_reader residual_bc;

815
  VP9_COMMON *const cm = &pbi->common;
816
  MACROBLOCKD *const xd = &pbi->mb;
817

818
  const uint8_t *const data_end = pbi->source + pbi->source_sz;
819
820
821
  const int aligned_mi_cols = mi_cols_aligned_to_sb(cm->mi_cols);
  const int tile_cols = 1 << cm->log2_tile_cols;
  const int tile_rows = 1 << cm->log2_tile_rows;
822
  int tile_row, tile_col;
823

824
825
  // Note: this memset assumes above_context[0], [1] and [2]
  // are allocated as part of the same buffer.
826
827
828
  vpx_memset(pbi->above_context[0], 0,
             sizeof(*pbi->above_context[0]) * MAX_MB_PLANE *
             2 * aligned_mi_cols);
829

830
831
  vpx_memset(pbi->above_seg_context, 0,
             sizeof(*pbi->above_seg_context) * aligned_mi_cols);
832
833
834

  if (pbi->oxcf.inv_tile_order) {
    const uint8_t *data_ptr2[4][1 << 6];
835
    vp9_reader bc_bak = {0};
836
837

    // pre-initialize the offsets, we're going to read in inverse order
838
    data_ptr2[0][0] = data;
Dmitry Kovalev's avatar
Dmitry Kovalev committed
839
    for (tile_row = 0; tile_row < tile_rows; tile_row++) {