vp9_encodeframe.c 146 KB
Newer Older
John Koleszar's avatar
John Koleszar committed
1
/*
2
 *  Copyright (c) 2010 The WebM project authors. All Rights Reserved.
John Koleszar's avatar
John Koleszar committed
3
 *
4
 *  Use of this source code is governed by a BSD-style license
5
6
 *  that can be found in the LICENSE file in the root of the source
 *  tree. An additional intellectual property rights grant can be found
7
 *  in the file PATENTS.  All contributing project authors may
8
 *  be found in the AUTHORS file in the root of the source tree.
John Koleszar's avatar
John Koleszar committed
9
10
 */

Dmitry Kovalev's avatar
Dmitry Kovalev committed
11
12
13
14
#include <limits.h>
#include <math.h>
#include <stdio.h>

Jim Bankoski's avatar
Jim Bankoski committed
15
#include "./vp9_rtcd.h"
Dmitry Kovalev's avatar
Dmitry Kovalev committed
16
17
18
19
#include "./vpx_config.h"

#include "vpx_ports/vpx_timer.h"

20
#include "vp9/common/vp9_common.h"
Yaowu Xu's avatar
Yaowu Xu committed
21
#include "vp9/common/vp9_entropy.h"
22
#include "vp9/common/vp9_entropymode.h"
23
#include "vp9/common/vp9_idct.h"
Dmitry Kovalev's avatar
Dmitry Kovalev committed
24
25
26
#include "vp9/common/vp9_mvref_common.h"
#include "vp9/common/vp9_pred_common.h"
#include "vp9/common/vp9_quant_common.h"
27
#include "vp9/common/vp9_reconintra.h"
Dmitry Kovalev's avatar
Dmitry Kovalev committed
28
#include "vp9/common/vp9_reconinter.h"
29
#include "vp9/common/vp9_seg_common.h"
30
#include "vp9/common/vp9_systemdependent.h"
31
#include "vp9/common/vp9_tile_common.h"
32

33
#include "vp9/encoder/vp9_aq_complexity.h"
Marco Paniconi's avatar
Marco Paniconi committed
34
35
#include "vp9/encoder/vp9_aq_cyclicrefresh.h"
#include "vp9/encoder/vp9_aq_variance.h"
Dmitry Kovalev's avatar
Dmitry Kovalev committed
36
37
38
#include "vp9/encoder/vp9_encodeframe.h"
#include "vp9/encoder/vp9_encodemb.h"
#include "vp9/encoder/vp9_encodemv.h"
39
#include "vp9/encoder/vp9_ethread.h"
40
#include "vp9/encoder/vp9_extend.h"
41
#include "vp9/encoder/vp9_pickmode.h"
42
#include "vp9/encoder/vp9_rd.h"
Dmitry Kovalev's avatar
Dmitry Kovalev committed
43
44
#include "vp9/encoder/vp9_rdopt.h"
#include "vp9/encoder/vp9_segmentation.h"
45
#include "vp9/encoder/vp9_tokenize.h"
46

47
48
49
50
51
52
#define GF_ZEROMV_ZBIN_BOOST 0
#define LF_ZEROMV_ZBIN_BOOST 0
#define MV_ZBIN_BOOST        0
#define SPLIT_MV_ZBIN_BOOST  0
#define INTRA_ZBIN_BOOST     0

53
54
static void encode_superblock(VP9_COMP *cpi, ThreadData * td,
                              TOKENEXTRA **t, int output_enabled,
55
56
                              int mi_row, int mi_col, BLOCK_SIZE bsize,
                              PICK_MODE_CONTEXT *ctx);
57

58
59
60
61
// This is used as a reference when computing the source variance for the
//  purposes of activity masking.
// Eventually this should be replaced by custom no-reference routines,
//  which will be faster.
62
static const uint8_t VP9_VAR_OFFS[64] = {
63
64
65
66
67
68
69
70
    128, 128, 128, 128, 128, 128, 128, 128,
    128, 128, 128, 128, 128, 128, 128, 128,
    128, 128, 128, 128, 128, 128, 128, 128,
    128, 128, 128, 128, 128, 128, 128, 128,
    128, 128, 128, 128, 128, 128, 128, 128,
    128, 128, 128, 128, 128, 128, 128, 128,
    128, 128, 128, 128, 128, 128, 128, 128,
    128, 128, 128, 128, 128, 128, 128, 128
71
72
};

73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
#if CONFIG_VP9_HIGHBITDEPTH
static const uint16_t VP9_HIGH_VAR_OFFS_8[64] = {
    128, 128, 128, 128, 128, 128, 128, 128,
    128, 128, 128, 128, 128, 128, 128, 128,
    128, 128, 128, 128, 128, 128, 128, 128,
    128, 128, 128, 128, 128, 128, 128, 128,
    128, 128, 128, 128, 128, 128, 128, 128,
    128, 128, 128, 128, 128, 128, 128, 128,
    128, 128, 128, 128, 128, 128, 128, 128,
    128, 128, 128, 128, 128, 128, 128, 128
};

static const uint16_t VP9_HIGH_VAR_OFFS_10[64] = {
    128*4, 128*4, 128*4, 128*4, 128*4, 128*4, 128*4, 128*4,
    128*4, 128*4, 128*4, 128*4, 128*4, 128*4, 128*4, 128*4,
    128*4, 128*4, 128*4, 128*4, 128*4, 128*4, 128*4, 128*4,
    128*4, 128*4, 128*4, 128*4, 128*4, 128*4, 128*4, 128*4,
    128*4, 128*4, 128*4, 128*4, 128*4, 128*4, 128*4, 128*4,
    128*4, 128*4, 128*4, 128*4, 128*4, 128*4, 128*4, 128*4,
    128*4, 128*4, 128*4, 128*4, 128*4, 128*4, 128*4, 128*4,
    128*4, 128*4, 128*4, 128*4, 128*4, 128*4, 128*4, 128*4
};

static const uint16_t VP9_HIGH_VAR_OFFS_12[64] = {
    128*16, 128*16, 128*16, 128*16, 128*16, 128*16, 128*16, 128*16,
    128*16, 128*16, 128*16, 128*16, 128*16, 128*16, 128*16, 128*16,
    128*16, 128*16, 128*16, 128*16, 128*16, 128*16, 128*16, 128*16,
    128*16, 128*16, 128*16, 128*16, 128*16, 128*16, 128*16, 128*16,
    128*16, 128*16, 128*16, 128*16, 128*16, 128*16, 128*16, 128*16,
    128*16, 128*16, 128*16, 128*16, 128*16, 128*16, 128*16, 128*16,
    128*16, 128*16, 128*16, 128*16, 128*16, 128*16, 128*16, 128*16,
    128*16, 128*16, 128*16, 128*16, 128*16, 128*16, 128*16, 128*16
};
#endif  // CONFIG_VP9_HIGHBITDEPTH

108
static unsigned int get_sby_perpixel_variance(VP9_COMP *cpi,
109
                                              const struct buf_2d *ref,
110
                                              BLOCK_SIZE bs) {
111
112
113
  unsigned int sse;
  const unsigned int var = cpi->fn_ptr[bs].vf(ref->buf, ref->stride,
                                              VP9_VAR_OFFS, 0, &sse);
114
  return ROUND_POWER_OF_TWO(var, num_pels_log2_lookup[bs]);
115
116
}

117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
#if CONFIG_VP9_HIGHBITDEPTH
static unsigned int high_get_sby_perpixel_variance(
    VP9_COMP *cpi, const struct buf_2d *ref, BLOCK_SIZE bs, int bd) {
  unsigned int var, sse;
  switch (bd) {
    case 10:
      var = cpi->fn_ptr[bs].vf(ref->buf, ref->stride,
                               CONVERT_TO_BYTEPTR(VP9_HIGH_VAR_OFFS_10),
                               0, &sse);
      break;
    case 12:
      var = cpi->fn_ptr[bs].vf(ref->buf, ref->stride,
                               CONVERT_TO_BYTEPTR(VP9_HIGH_VAR_OFFS_12),
                               0, &sse);
      break;
    case 8:
    default:
      var = cpi->fn_ptr[bs].vf(ref->buf, ref->stride,
                               CONVERT_TO_BYTEPTR(VP9_HIGH_VAR_OFFS_8),
                               0, &sse);
      break;
  }
  return ROUND_POWER_OF_TWO(var, num_pels_log2_lookup[bs]);
}
#endif  // CONFIG_VP9_HIGHBITDEPTH

143
static unsigned int get_sby_perpixel_diff_variance(VP9_COMP *cpi,
144
145
                                                   const struct buf_2d *ref,
                                                   int mi_row, int mi_col,
146
                                                   BLOCK_SIZE bs) {
147
148
149
150
151
152
  const YV12_BUFFER_CONFIG *last = get_ref_frame_buffer(cpi, LAST_FRAME);
  const uint8_t* last_y = &last->y_buffer[mi_row * MI_SIZE * last->y_stride +
                                              mi_col * MI_SIZE];
  unsigned int sse;
  const unsigned int var = cpi->fn_ptr[bs].vf(ref->buf, ref->stride,
                                              last_y, last->y_stride, &sse);
153
154
155
  return ROUND_POWER_OF_TWO(var, num_pels_log2_lookup[bs]);
}

156
static BLOCK_SIZE get_rd_var_based_fixed_partition(VP9_COMP *cpi, MACROBLOCK *x,
157
158
                                                   int mi_row,
                                                   int mi_col) {
159
  unsigned int var = get_sby_perpixel_diff_variance(cpi, &x->plane[0].src,
160
161
162
                                                    mi_row, mi_col,
                                                    BLOCK_64X64);
  if (var < 8)
163
    return BLOCK_64X64;
164
  else if (var < 128)
165
    return BLOCK_32X32;
166
167
168
169
  else if (var < 2048)
    return BLOCK_16X16;
  else
    return BLOCK_8X8;
170
171
}

172
static BLOCK_SIZE get_nonrd_var_based_fixed_partition(VP9_COMP *cpi,
173
                                                      MACROBLOCK *x,
174
175
                                                      int mi_row,
                                                      int mi_col) {
176
  unsigned int var = get_sby_perpixel_diff_variance(cpi, &x->plane[0].src,
177
178
                                                    mi_row, mi_col,
                                                    BLOCK_64X64);
179
  if (var < 4)
180
    return BLOCK_64X64;
181
  else if (var < 10)
182
183
    return BLOCK_32X32;
  else
184
    return BLOCK_16X16;
185
186
}

187
188
// Lighter version of set_offsets that only sets the mode info
// pointers.
Jingning Han's avatar
Jingning Han committed
189
190
191
192
static INLINE void set_mode_info_offsets(VP9_COMMON *const cm,
                                         MACROBLOCKD *const xd,
                                         int mi_row,
                                         int mi_col) {
193
  const int idx_str = xd->mi_stride * mi_row + mi_col;
hkuang's avatar
hkuang committed
194
195
  xd->mi = cm->mi + idx_str;
  xd->mi[0].src_mi = &xd->mi[0];
196
197
198
}

static void set_offsets(VP9_COMP *cpi, const TileInfo *const tile,
199
200
                        MACROBLOCK *const x, int mi_row, int mi_col,
                        BLOCK_SIZE bsize) {
201
202
203
204
205
206
207
  VP9_COMMON *const cm = &cpi->common;
  MACROBLOCKD *const xd = &x->e_mbd;
  MB_MODE_INFO *mbmi;
  const int mi_width = num_8x8_blocks_wide_lookup[bsize];
  const int mi_height = num_8x8_blocks_high_lookup[bsize];
  const struct segmentation *const seg = &cm->seg;

208
  set_skip_context(xd, mi_row, mi_col);
209

Jingning Han's avatar
Jingning Han committed
210
  set_mode_info_offsets(cm, xd, mi_row, mi_col);
211

hkuang's avatar
hkuang committed
212
  mbmi = &xd->mi[0].src_mi->mbmi;
213
214

  // Set up destination pointers.
215
  vp9_setup_dst_planes(xd->plane, get_frame_new_buffer(cm), mi_row, mi_col);
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232

  // Set up limit values for MV components.
  // Mv beyond the range do not produce new/different prediction block.
  x->mv_row_min = -(((mi_row + mi_height) * MI_SIZE) + VP9_INTERP_EXTEND);
  x->mv_col_min = -(((mi_col + mi_width) * MI_SIZE) + VP9_INTERP_EXTEND);
  x->mv_row_max = (cm->mi_rows - mi_row) * MI_SIZE + VP9_INTERP_EXTEND;
  x->mv_col_max = (cm->mi_cols - mi_col) * MI_SIZE + VP9_INTERP_EXTEND;

  // Set up distance of MB to edge of frame in 1/8th pel units.
  assert(!(mi_col & (mi_width - 1)) && !(mi_row & (mi_height - 1)));
  set_mi_row_col(xd, tile, mi_row, mi_height, mi_col, mi_width,
                 cm->mi_rows, cm->mi_cols);

  // Set up source buffers.
  vp9_setup_src_planes(x, cpi->Source, mi_row, mi_col);

  // R/D setup.
Dmitry Kovalev's avatar
Dmitry Kovalev committed
233
234
  x->rddiv = cpi->rd.RDDIV;
  x->rdmult = cpi->rd.RDMULT;
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251

  // Setup segment ID.
  if (seg->enabled) {
    if (cpi->oxcf.aq_mode != VARIANCE_AQ) {
      const uint8_t *const map = seg->update_map ? cpi->segmentation_map
                                                 : cm->last_frame_seg_map;
      mbmi->segment_id = vp9_get_segment_id(cm, map, bsize, mi_row, mi_col);
    }
    vp9_init_plane_quantizers(cpi, x);

    x->encode_breakout = cpi->segment_encode_breakout[mbmi->segment_id];
  } else {
    mbmi->segment_id = 0;
    x->encode_breakout = cpi->encode_breakout;
  }
}

252
253
254
static void duplicate_mode_info_in_sb(VP9_COMMON *cm, MACROBLOCKD *xd,
                                      int mi_row, int mi_col,
                                      BLOCK_SIZE bsize) {
255
256
257
258
259
260
  const int block_width = num_8x8_blocks_wide_lookup[bsize];
  const int block_height = num_8x8_blocks_high_lookup[bsize];
  int i, j;
  for (j = 0; j < block_height; ++j)
    for (i = 0; i < block_width; ++i) {
      if (mi_row + j < cm->mi_rows && mi_col + i < cm->mi_cols)
hkuang's avatar
hkuang committed
261
        xd->mi[j * xd->mi_stride + i].src_mi = &xd->mi[0];
262
263
264
265
    }
}

static void set_block_size(VP9_COMP * const cpi,
266
                           MACROBLOCKD *const xd,
267
268
269
                           int mi_row, int mi_col,
                           BLOCK_SIZE bsize) {
  if (cpi->common.mi_cols > mi_col && cpi->common.mi_rows > mi_row) {
Jingning Han's avatar
Jingning Han committed
270
    set_mode_info_offsets(&cpi->common, xd, mi_row, mi_col);
hkuang's avatar
hkuang committed
271
    xd->mi[0].src_mi->mbmi.sb_type = bsize;
272
273
274
275
276
277
  }
}

typedef struct {
  int64_t sum_square_error;
  int64_t sum_error;
278
  int log2_count;
279
280
281
282
283
284
285
286
287
288
289
290
  int variance;
} var;

typedef struct {
  var none;
  var horz[2];
  var vert[2];
} partition_variance;

typedef struct {
  partition_variance part_variances;
  var split[4];
291
292
293
294
295
} v4x4;

typedef struct {
  partition_variance part_variances;
  v4x4 split[4];
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
} v8x8;

typedef struct {
  partition_variance part_variances;
  v8x8 split[4];
} v16x16;

typedef struct {
  partition_variance part_variances;
  v16x16 split[4];
} v32x32;

typedef struct {
  partition_variance part_variances;
  v32x32 split[4];
} v64x64;

typedef struct {
  partition_variance *part_variances;
  var *split[4];
} variance_node;

typedef enum {
  V16X16,
  V32X32,
  V64X64,
} TREE_LEVEL;

static void tree_to_node(void *data, BLOCK_SIZE bsize, variance_node *node) {
  int i;
326
  node->part_variances = NULL;
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
  switch (bsize) {
    case BLOCK_64X64: {
      v64x64 *vt = (v64x64 *) data;
      node->part_variances = &vt->part_variances;
      for (i = 0; i < 4; i++)
        node->split[i] = &vt->split[i].part_variances.none;
      break;
    }
    case BLOCK_32X32: {
      v32x32 *vt = (v32x32 *) data;
      node->part_variances = &vt->part_variances;
      for (i = 0; i < 4; i++)
        node->split[i] = &vt->split[i].part_variances.none;
      break;
    }
    case BLOCK_16X16: {
      v16x16 *vt = (v16x16 *) data;
      node->part_variances = &vt->part_variances;
      for (i = 0; i < 4; i++)
        node->split[i] = &vt->split[i].part_variances.none;
      break;
    }
    case BLOCK_8X8: {
      v8x8 *vt = (v8x8 *) data;
      node->part_variances = &vt->part_variances;
352
353
354
355
356
357
358
      for (i = 0; i < 4; i++)
        node->split[i] = &vt->split[i].part_variances.none;
      break;
    }
    case BLOCK_4X4: {
      v4x4 *vt = (v4x4 *) data;
      node->part_variances = &vt->part_variances;
359
360
361
362
363
364
      for (i = 0; i < 4; i++)
        node->split[i] = &vt->split[i];
      break;
    }
    default: {
      assert(0);
365
      break;
366
367
368
369
370
371
372
373
    }
  }
}

// Set variance values given sum square error, sum error, count.
static void fill_variance(int64_t s2, int64_t s, int c, var *v) {
  v->sum_square_error = s2;
  v->sum_error = s;
374
  v->log2_count = c;
375
376
377
}

static void get_variance(var *v) {
378
379
  v->variance = (int)(256 * (v->sum_square_error -
      ((v->sum_error * v->sum_error) >> v->log2_count)) >> v->log2_count);
380
381
382
}

void sum_2_variances(const var *a, const var *b, var *r) {
383
  assert(a->log2_count == b->log2_count);
384
  fill_variance(a->sum_square_error + b->sum_square_error,
385
                a->sum_error + b->sum_error, a->log2_count + 1, r);
386
387
388
389
390
391
392
393
394
395
396
397
398
399
}

static void fill_variance_tree(void *data, BLOCK_SIZE bsize) {
  variance_node node;
  tree_to_node(data, bsize, &node);
  sum_2_variances(node.split[0], node.split[1], &node.part_variances->horz[0]);
  sum_2_variances(node.split[2], node.split[3], &node.part_variances->horz[1]);
  sum_2_variances(node.split[0], node.split[2], &node.part_variances->vert[0]);
  sum_2_variances(node.split[1], node.split[3], &node.part_variances->vert[1]);
  sum_2_variances(&node.part_variances->vert[0], &node.part_variances->vert[1],
                  &node.part_variances->none);
}

static int set_vt_partitioning(VP9_COMP *cpi,
400
                               MACROBLOCKD *const xd,
401
402
403
                               void *data,
                               BLOCK_SIZE bsize,
                               int mi_row,
Yaowu Xu's avatar
Yaowu Xu committed
404
                               int mi_col) {
405
406
407
408
  VP9_COMMON * const cm = &cpi->common;
  variance_node vt;
  const int block_width = num_8x8_blocks_wide_lookup[bsize];
  const int block_height = num_8x8_blocks_high_lookup[bsize];
409
410
  // TODO(marpan): Adjust/tune these thresholds.
  const int threshold_multiplier = cm->frame_type == KEY_FRAME ? 80 : 4;
411
412
413
  int64_t threshold =
      (int64_t)(threshold_multiplier *
                vp9_convert_qindex_to_q(cm->base_qindex, cm->bit_depth));
414
415
416
417
  int64_t threshold_bsize_ref = threshold << 6;
  int64_t threshold_low = threshold;
  BLOCK_SIZE bsize_ref = BLOCK_16X16;

418
419
420
  assert(block_height == block_width);
  tree_to_node(data, bsize, &vt);

421
422
  if (cm->frame_type == KEY_FRAME) {
    bsize_ref = BLOCK_8X8;
423
424
425
    // Choose lower thresholds for key frame variance to favor split, but keep
    // threshold for splitting to 4x4 block still fairly high for now.
    threshold_bsize_ref = threshold << 2;
426
    threshold_low = threshold >> 2;
427
428
  }

429
430
431
432
  // For bsize=bsize_ref (16x16/8x8 for 8x8/4x4 downsampling), select if
  // variance is below threshold, otherwise split will be selected.
  // No check for vert/horiz split as too few samples for variance.
  if (bsize == bsize_ref) {
433
    get_variance(&vt.part_variances->none);
434
435
436
437
438
439
440
441
    if (mi_col + block_width / 2 < cm->mi_cols &&
        mi_row + block_height / 2 < cm->mi_rows &&
        vt.part_variances->none.variance < threshold_bsize_ref) {
      set_block_size(cpi, xd, mi_row, mi_col, bsize);
      return 1;
    }
    return 0;
  } else if (bsize > bsize_ref) {
442
    get_variance(&vt.part_variances->none);
443
444
445
446
447
448
449
450
451
452
453
454
455
    // For key frame, for bsize above 32X32, or very high variance, take split.
    if (cm->frame_type == KEY_FRAME &&
        (bsize > BLOCK_32X32 ||
        vt.part_variances->none.variance > (threshold << 2))) {
      return 0;
    }
    // If variance is low, take the bsize (no split).
    if (mi_col + block_width / 2 < cm->mi_cols &&
        mi_row + block_height / 2 < cm->mi_rows &&
        vt.part_variances->none.variance < threshold_low) {
      set_block_size(cpi, xd, mi_row, mi_col, bsize);
      return 1;
    }
456

457
    // Check vertical split.
458
459
460
461
462
463
464
465
466
467
    if (mi_row + block_height / 2 < cm->mi_rows) {
      get_variance(&vt.part_variances->vert[0]);
      get_variance(&vt.part_variances->vert[1]);
      if (vt.part_variances->vert[0].variance < threshold_low &&
          vt.part_variances->vert[1].variance < threshold_low) {
        BLOCK_SIZE subsize = get_subsize(bsize, PARTITION_VERT);
        set_block_size(cpi, xd, mi_row, mi_col, subsize);
        set_block_size(cpi, xd, mi_row, mi_col + block_width / 2, subsize);
        return 1;
      }
468
    }
469
    // Check horizontal split.
470
471
472
473
474
475
476
477
478
479
    if (mi_col + block_width / 2 < cm->mi_cols) {
      get_variance(&vt.part_variances->horz[0]);
      get_variance(&vt.part_variances->horz[1]);
      if (vt.part_variances->horz[0].variance < threshold_low &&
          vt.part_variances->horz[1].variance < threshold_low) {
        BLOCK_SIZE subsize = get_subsize(bsize, PARTITION_HORZ);
        set_block_size(cpi, xd, mi_row, mi_col, subsize);
        set_block_size(cpi, xd, mi_row + block_height / 2, mi_col, subsize);
        return 1;
      }
480
    }
481

482
    return 0;
483
484
485
486
  }
  return 0;
}

487
488
489
// This function chooses partitioning based on the variance between source and
// reconstructed last, where variance is computed for downsampled inputs.
// Currently 8x8 downsampling is used for delta frames, 4x4 for key frames.
490
491
static void choose_partitioning(VP9_COMP *cpi,
                                const TileInfo *const tile,
492
                                MACROBLOCK *x,
493
494
                                int mi_row, int mi_col) {
  VP9_COMMON * const cm = &cpi->common;
495
  MACROBLOCKD *xd = &x->e_mbd;
496

497
  int i, j, k, m;
498
499
500
501
502
503
504
505
506
  v64x64 vt;
  uint8_t *s;
  const uint8_t *d;
  int sp;
  int dp;
  int pixels_wide = 64, pixels_high = 64;
  const YV12_BUFFER_CONFIG *yv12 = get_ref_frame_buffer(cpi, LAST_FRAME);
  const struct scale_factors *const sf = &cm->frame_refs[LAST_FRAME - 1].sf;

507
  vp9_clear_system_state();
508
  set_offsets(cpi, tile, x, mi_row, mi_col, BLOCK_64X64);
509
510
511
512
513
514
515
516
517
518
519
520

  if (xd->mb_to_right_edge < 0)
    pixels_wide += (xd->mb_to_right_edge >> 3);
  if (xd->mb_to_bottom_edge < 0)
    pixels_high += (xd->mb_to_bottom_edge >> 3);

  s = x->plane[0].src.buf;
  sp = x->plane[0].src.stride;

  if (cm->frame_type != KEY_FRAME) {
    vp9_setup_pre_planes(xd, 0, yv12, mi_row, mi_col, sf);

hkuang's avatar
hkuang committed
521
522
    xd->mi[0].src_mi->mbmi.ref_frame[0] = LAST_FRAME;
    xd->mi[0].src_mi->mbmi.sb_type = BLOCK_64X64;
523
    xd->mi[0].src_mi->mbmi.mv[0].as_int = 0;
524
525
526
527
528
529
530
    vp9_build_inter_predictors_sby(xd, mi_row, mi_col, BLOCK_64X64);

    d = xd->plane[0].dst.buf;
    dp = xd->plane[0].dst.stride;
  } else {
    d = VP9_VAR_OFFS;
    dp = 0;
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
#if CONFIG_VP9_HIGHBITDEPTH
    if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
      switch (xd->bd) {
        case 10:
          d = CONVERT_TO_BYTEPTR(VP9_HIGH_VAR_OFFS_10);
          break;
        case 12:
          d = CONVERT_TO_BYTEPTR(VP9_HIGH_VAR_OFFS_12);
          break;
        case 8:
        default:
          d = CONVERT_TO_BYTEPTR(VP9_HIGH_VAR_OFFS_8);
          break;
      }
    }
#endif  // CONFIG_VP9_HIGHBITDEPTH
547
548
549
550
551
552
553
554
555
556
557
  }

  // Fill in the entire tree of 8x8 variances for splits.
  for (i = 0; i < 4; i++) {
    const int x32_idx = ((i & 1) << 5);
    const int y32_idx = ((i >> 1) << 5);
    for (j = 0; j < 4; j++) {
      const int x16_idx = x32_idx + ((j & 1) << 4);
      const int y16_idx = y32_idx + ((j >> 1) << 4);
      v16x16 *vst = &vt.split[i].split[j];
      for (k = 0; k < 4; k++) {
558
559
560
561
562
563
564
        int x8_idx = x16_idx + ((k & 1) << 3);
        int y8_idx = y16_idx + ((k >> 1) << 3);
        if (cm->frame_type != KEY_FRAME) {
          unsigned int sse = 0;
          int sum = 0;
          if (x8_idx < pixels_wide && y8_idx < pixels_high) {
            int s_avg, d_avg;
565
#if CONFIG_VP9_HIGHBITDEPTH
566
567
568
569
570
571
572
            if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
              s_avg = vp9_highbd_avg_8x8(s + y8_idx * sp + x8_idx, sp);
              d_avg = vp9_highbd_avg_8x8(d + y8_idx * dp + x8_idx, dp);
            } else {
              s_avg = vp9_avg_8x8(s + y8_idx * sp + x8_idx, sp);
              d_avg = vp9_avg_8x8(d + y8_idx * dp + x8_idx, dp);
           }
573
#else
574
575
            s_avg = vp9_avg_8x8(s + y8_idx * sp + x8_idx, sp);
            d_avg = vp9_avg_8x8(d + y8_idx * dp + x8_idx, dp);
576
#endif
577
578
579
580
581
582
            sum = s_avg - d_avg;
            sse = sum * sum;
          }
          // If variance is based on 8x8 downsampling, we stop here and have
          // one sample for 8x8 block (so use 1 for count in fill_variance),
          // which of course means variance = 0 for 8x8 block.
583
          fill_variance(sse, sum, 0, &vst->split[k].part_variances.none);
584
585
586
587
588
589
590
591
592
        } else {
          // For key frame, go down to 4x4.
          v8x8 *vst2 = &vst->split[k];
          for (m = 0; m < 4; m++) {
            int x4_idx = x8_idx + ((m & 1) << 2);
            int y4_idx = y8_idx + ((m >> 1) << 2);
            unsigned int sse = 0;
            int sum = 0;
            if (x4_idx < pixels_wide && y4_idx < pixels_high) {
593
594
595
596
597
598
599
600
#if CONFIG_VP9_HIGHBITDEPTH
              int s_avg;
              if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
                s_avg = vp9_highbd_avg_4x4(s + y4_idx * sp + x4_idx, sp);
              } else {
                s_avg = vp9_avg_4x4(s + y4_idx * sp + x4_idx, sp);
              }
#else
601
              int s_avg = vp9_avg_4x4(s + y4_idx * sp + x4_idx, sp);
602
#endif
603
604
605
606
607
608
609
              // For key frame, reference is set to 128.
              sum = s_avg - 128;
              sse = sum * sum;
            }
            // If variance is based on 4x4 downsampling, we stop here and have
            // one sample for 4x4 block (so use 1 for count in fill_variance),
            // which of course means variance = 0 for 4x4 block.
610
           fill_variance(sse, sum, 0, &vst2->split[m].part_variances.none);
611
          }
612
        }
613
614
615
616
617
618
      }
    }
  }
  // Fill the rest of the variance tree by summing split partition values.
  for (i = 0; i < 4; i++) {
    for (j = 0; j < 4; j++) {
619
620
621
622
623
      if (cm->frame_type == KEY_FRAME) {
        for (m = 0; m < 4; m++) {
          fill_variance_tree(&vt.split[i].split[j].split[m], BLOCK_8X8);
        }
      }
624
625
626
627
628
629
630
      fill_variance_tree(&vt.split[i].split[j], BLOCK_16X16);
    }
    fill_variance_tree(&vt.split[i], BLOCK_32X32);
  }
  fill_variance_tree(&vt, BLOCK_64X64);

  // Now go through the entire structure,  splitting every block size until
631
  // we get to one that's got a variance lower than our threshold.
632
  if ( mi_col + 8 > cm->mi_cols || mi_row + 8 > cm->mi_rows ||
633
      !set_vt_partitioning(cpi, xd, &vt, BLOCK_64X64, mi_row, mi_col)) {
634
635
636
    for (i = 0; i < 4; ++i) {
      const int x32_idx = ((i & 1) << 2);
      const int y32_idx = ((i >> 1) << 2);
637
      if (!set_vt_partitioning(cpi, xd, &vt.split[i], BLOCK_32X32,
Yaowu Xu's avatar
Yaowu Xu committed
638
                               (mi_row + y32_idx), (mi_col + x32_idx))) {
639
640
641
        for (j = 0; j < 4; ++j) {
          const int x16_idx = ((j & 1) << 1);
          const int y16_idx = ((j >> 1) << 1);
642
643
644
645
646
647
648
          // Note: If 8x8 downsampling is used for variance calculation we
          // cannot really select block size 8x8 (or even 8x16/16x8), since we
          // don't have sufficient samples for variance. So on delta frames,
          // 8x8 partition is only set if variance of the 16x16 block is very
          // high. For key frames, 4x4 downsampling is used, so we can better
          // select 8x16/16x8 and 8x8. 4x4 partition can potentially be set
          // used here too, but for now 4x4 is not allowed.
649
          if (!set_vt_partitioning(cpi, xd, &vt.split[i].split[j],
650
                                   BLOCK_16X16,
651
652
                                   mi_row + y32_idx + y16_idx,
                                   mi_col + x32_idx + x16_idx)) {
653
654
655
            for (k = 0; k < 4; ++k) {
              const int x8_idx = (k & 1);
              const int y8_idx = (k >> 1);
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
              if (cm->frame_type == KEY_FRAME) {
                if (!set_vt_partitioning(cpi, xd,
                                         &vt.split[i].split[j].split[k],
                                         BLOCK_8X8,
                                         mi_row + y32_idx + y16_idx + y8_idx,
                                         mi_col + x32_idx + x16_idx + x8_idx)) {
                    set_block_size(cpi, xd,
                                  (mi_row + y32_idx + y16_idx + y8_idx),
                                  (mi_col + x32_idx + x16_idx + x8_idx),
                                   BLOCK_4X4);
                }
              } else {
                set_block_size(cpi, xd,
                               (mi_row + y32_idx + y16_idx + y8_idx),
                               (mi_col + x32_idx + x16_idx + x8_idx),
                               BLOCK_8X8);
672
               }
673
674
675
676
677
678
679
680
            }
          }
        }
      }
    }
  }
}

681
682
static void update_state(VP9_COMP *cpi, ThreadData *td,
                         PICK_MODE_CONTEXT *ctx,
683
684
                         int mi_row, int mi_col, BLOCK_SIZE bsize,
                         int output_enabled) {
Ronald S. Bultje's avatar
Ronald S. Bultje committed
685
  int i, x_idx, y;
686
  VP9_COMMON *const cm = &cpi->common;
687
688
  RD_COUNTS *const rdc = &td->rd_counts;
  MACROBLOCK *const x = &td->mb;
689
  MACROBLOCKD *const xd = &x->e_mbd;
690
691
  struct macroblock_plane *const p = x->plane;
  struct macroblockd_plane *const pd = xd->plane;
John Koleszar's avatar
John Koleszar committed
692
  MODE_INFO *mi = &ctx->mic;
hkuang's avatar
hkuang committed
693
694
  MB_MODE_INFO *const mbmi = &xd->mi[0].src_mi->mbmi;
  MODE_INFO *mi_addr = &xd->mi[0];
695
  const struct segmentation *const seg = &cm->seg;
696
697
698
699
700
701
702
  const int bw = num_8x8_blocks_wide_lookup[mi->mbmi.sb_type];
  const int bh = num_8x8_blocks_high_lookup[mi->mbmi.sb_type];
  const int x_mis = MIN(bw, cm->mi_cols - mi_col);
  const int y_mis = MIN(bh, cm->mi_rows - mi_row);
  MV_REF *const frame_mvs =
      cm->cur_frame->mvs + mi_row * cm->mi_cols + mi_col;
  int w, h;
703

704
  const int mis = cm->mi_stride;
Jim Bankoski's avatar
Jim Bankoski committed
705
  const int mi_width = num_8x8_blocks_wide_lookup[bsize];
Jim Bankoski's avatar
Jim Bankoski committed
706
  const int mi_height = num_8x8_blocks_high_lookup[bsize];
707
  int max_plane;
Adrian Grange's avatar
Adrian Grange committed
708

709
  assert(mi->mbmi.sb_type == bsize);
710

711
  *mi_addr = *mi;
hkuang's avatar
hkuang committed
712
  mi_addr->src_mi = mi_addr;
713

Paul Wilkins's avatar
Paul Wilkins committed
714
  // If segmentation in use
715
  if (seg->enabled) {
Paul Wilkins's avatar
Paul Wilkins committed
716
717
718
719
720
721
722
723
724
    // For in frame complexity AQ copy the segment id from the segment map.
    if (cpi->oxcf.aq_mode == COMPLEXITY_AQ) {
      const uint8_t *const map = seg->update_map ? cpi->segmentation_map
                                                 : cm->last_frame_seg_map;
      mi_addr->mbmi.segment_id =
        vp9_get_segment_id(cm, map, bsize, mi_row, mi_col);
    }
    // Else for cyclic refresh mode update the segment map, set the segment id
    // and then update the quantizer.
725
    if (cpi->oxcf.aq_mode == CYCLIC_REFRESH_AQ) {
hkuang's avatar
hkuang committed
726
      vp9_cyclic_refresh_update_segment(cpi, &xd->mi[0].src_mi->mbmi,
727
                                        mi_row, mi_col, bsize, 1, ctx->rate);
Paul Wilkins's avatar
Paul Wilkins committed
728
    }
729
  }
730

731
732
  max_plane = is_inter_block(mbmi) ? MAX_MB_PLANE : 1;
  for (i = 0; i < max_plane; ++i) {
733
    p[i].coeff = ctx->coeff_pbuf[i][1];
734
    p[i].qcoeff = ctx->qcoeff_pbuf[i][1];
735
    pd[i].dqcoeff = ctx->dqcoeff_pbuf[i][1];
736
    p[i].eobs = ctx->eobs_pbuf[i][1];
737
738
  }

739
740
  for (i = max_plane; i < MAX_MB_PLANE; ++i) {
    p[i].coeff = ctx->coeff_pbuf[i][2];
741
    p[i].qcoeff = ctx->qcoeff_pbuf[i][2];
742
    pd[i].dqcoeff = ctx->dqcoeff_pbuf[i][2];
743
    p[i].eobs = ctx->eobs_pbuf[i][2];
744
745
  }

John Koleszar's avatar
John Koleszar committed
746
747
  // Restore the coding context of the MB to that that was in place
  // when the mode was picked for it
748
749
  for (y = 0; y < mi_height; y++)
    for (x_idx = 0; x_idx < mi_width; x_idx++)
James Zern's avatar
James Zern committed
750
      if ((xd->mb_to_right_edge >> (3 + MI_SIZE_LOG2)) + mi_width > x_idx
751
        && (xd->mb_to_bottom_edge >> (3 + MI_SIZE_LOG2)) + mi_height > y) {
hkuang's avatar
hkuang committed
752
        xd->mi[x_idx + y * mis].src_mi = mi_addr;
753
      }
754

755
  if (cpi->oxcf.aq_mode)
Dmitry Kovalev's avatar
Dmitry Kovalev committed
756
    vp9_init_plane_quantizers(cpi, x);
757

758
759
  // FIXME(rbultje) I'm pretty sure this should go to the end of this block
  // (i.e. after the output_enabled)
760
761
  if (bsize < BLOCK_32X32) {
    if (bsize < BLOCK_16X16)
762
763
      ctx->tx_rd_diff[ALLOW_16X16] = ctx->tx_rd_diff[ALLOW_8X8];
    ctx->tx_rd_diff[ALLOW_32X32] = ctx->tx_rd_diff[ALLOW_16X16];
Ronald S. Bultje's avatar
Ronald S. Bultje committed
764
  }
Adrian Grange's avatar
Adrian Grange committed
765

766
  if (is_inter_block(mbmi) && mbmi->sb_type < BLOCK_8X8) {
767
768
    mbmi->mv[0].as_int = mi->bmi[3].as_mv[0].as_int;
    mbmi->mv[1].as_int = mi->bmi[3].as_mv[1].as_int;
John Koleszar's avatar
John Koleszar committed
769
770
  }

771
  x->skip = ctx->skip;
772
  vpx_memcpy(x->zcoeff_blk[mbmi->tx_size], ctx->zcoeff_blk,
773
             sizeof(uint8_t) * ctx->num_4x4_blk);
774

Ronald S. Bultje's avatar
Ronald S. Bultje committed
775
776
777
  if (!output_enabled)
    return;

778
  if (!vp9_segfeature_active(&cm->seg, mbmi->segment_id, SEG_LVL_SKIP)) {
779
    for (i = 0; i < TX_MODES; i++)
780
      rdc->tx_select_diff[i] += ctx->tx_rd_diff[i];
781
782
  }

783
#if CONFIG_INTERNAL_STATS
784
  if (frame_is_intra_only(cm)) {
John Koleszar's avatar
John Koleszar committed
785
    static const int kf_mode_index[] = {
786
787
788
789
      THR_DC        /*DC_PRED*/,
      THR_V_PRED    /*V_PRED*/,
      THR_H_PRED    /*H_PRED*/,
      THR_D45_PRED  /*D45_PRED*/,
John Koleszar's avatar
John Koleszar committed
790
791
792
      THR_D135_PRED /*D135_PRED*/,
      THR_D117_PRED /*D117_PRED*/,
      THR_D153_PRED /*D153_PRED*/,
Dmitry Kovalev's avatar
Dmitry Kovalev committed
793
      THR_D207_PRED /*D207_PRED*/,
794
795
      THR_D63_PRED  /*D63_PRED*/,
      THR_TM        /*TM_PRED*/,
John Koleszar's avatar
John Koleszar committed
796
    };
797
    ++cpi->mode_chosen_counts[kf_mode_index[mbmi->mode]];
John Koleszar's avatar
John Koleszar committed
798
799
  } else {
    // Note how often each mode chosen as best
800
801
802
803
    ++cpi->mode_chosen_counts[ctx->best_mode_index];
  }
#endif
  if (!frame_is_intra_only(cm)) {
804
    if (is_inter_block(mbmi)) {
805
      vp9_update_mv_count(td);
806
807
808

      if (cm->interp_filter == SWITCHABLE) {
        const int ctx = vp9_get_pred_context_switchable_interp(xd);
809
        ++td->counts->switchable_interp[ctx][mbmi->interp_filter];
810
      }
811
    }
Adrian Grange's avatar
Adrian Grange committed
812

813
814
815
    rdc->comp_pred_diff[SINGLE_REFERENCE] += ctx->single_pred_diff;
    rdc->comp_pred_diff[COMPOUND_REFERENCE] += ctx->comp_pred_diff;
    rdc->comp_pred_diff[REFERENCE_MODE_SELECT] += ctx->hybrid_pred_diff;
816

817
    for (i = 0; i < SWITCHABLE_FILTER_CONTEXTS; ++i)
818
      rdc->filter_diff[i] += ctx->best_filter_diff[i];
John Koleszar's avatar
John Koleszar committed
819
  }
820
821
822
823
824
825
826
827
828
829
830

  for (h = 0; h < y_mis; ++h) {
    MV_REF *const frame_mv = frame_mvs + h * cm->mi_cols;
    for (w = 0; w < x_mis; ++w) {
      MV_REF *const mv = frame_mv + w;
      mv->ref_frame[0] = mi->src_mi->mbmi.ref_frame[0];
      mv->ref_frame[1] = mi->src_mi->mbmi.ref_frame[1];
      mv->mv[0].as_int = mi->src_mi->mbmi.mv[0].as_int;
      mv->mv[1].as_int = mi->src_mi->mbmi.mv[1].as_int;
    }
  }
Adrian Grange's avatar
Adrian Grange committed
831
832
}

Jim Bankoski's avatar
Jim Bankoski committed
833
void vp9_setup_src_planes(MACROBLOCK *x, const YV12_BUFFER_CONFIG *src,
834
                          int mi_row, int mi_col) {
835
836
  uint8_t *const buffers[3] = {src->y_buffer, src->u_buffer, src->v_buffer };
  const int strides[3] = {src->y_stride, src->uv_stride, src->uv_stride };
837
838
  int i;

839
840
841
  // Set current frame pointer.
  x->e_mbd.cur_buf = src;

842
843
  for (i = 0; i < MAX_MB_PLANE; i++)
    setup_pred_plane(&x->plane[i].src, buffers[i], strides[i], mi_row, mi_col,
Jim Bankoski's avatar
Jim Bankoski committed
844
                     NULL, x->e_mbd.plane[i].subsampling_x,
845
                     x->e_mbd.plane[i].subsampling_y);
John Koleszar's avatar
John Koleszar committed
846
847
}

848
849
static void set_mode_info_seg_skip(MACROBLOCK *x, TX_MODE tx_mode,
                                   RD_COST *rd_cost, BLOCK_SIZE bsize) {
850
  MACROBLOCKD *const xd = &x->e_mbd;
hkuang's avatar
hkuang committed
851
  MB_MODE_INFO *const mbmi = &xd->mi[0].src_mi->mbmi;
852
853
854
  INTERP_FILTER filter_ref;

  if (xd->up_available)
hkuang's avatar
hkuang committed
855
    filter_ref = xd->mi[-xd->mi_stride].src_mi->mbmi.interp_filter;
856
  else if (xd->left_available)
hkuang's avatar
hkuang committed
857
    filter_ref = xd->mi[-1].src_mi->mbmi.interp_filter;
858
859
860
861
862
863
864
865
866
867
868
869
870
871
  else
    filter_ref = EIGHTTAP;

  mbmi->sb_type = bsize;
  mbmi->mode = ZEROMV;
  mbmi->tx_size = MIN(max_txsize_lookup[bsize],
                      tx_mode_to_biggest_tx_size[tx_mode]);
  mbmi->skip = 1;
  mbmi->uv_mode = DC_PRED;
  mbmi->ref_frame[0] = LAST_FRAME;
  mbmi->ref_frame[1] = NONE;
  mbmi->mv[0].as_int = 0;
  mbmi->interp_filter = filter_ref;

hkuang's avatar
hkuang committed
872
  xd->mi[0].src_mi->bmi[0].as_mv[0].as_int = 0;
873
874
  x->skip = 1;

875
  vp9_rd_cost_init(rd_cost);
876
877
}

878
879
880
881
882
883
884
885
886
887
888
889
static int set_segment_rdmult(VP9_COMP *const cpi,
                               MACROBLOCK *const x,
                               int8_t segment_id) {
  int segment_qindex;
  VP9_COMMON *const cm = &cpi->common;
  vp9_init_plane_quantizers(cpi, x);
  vp9_clear_system_state();
  segment_qindex = vp9_get_qindex(&cm->seg, segment_id,
                                  cm->base_qindex);
  return vp9_compute_rd_mult(cpi, segment_qindex + cm->y_dc_delta_q);
}

890
static void rd_pick_sb_modes(VP9_COMP *cpi,
891
                             TileDataEnc *tile_data,
892
                             MACROBLOCK *const x,
893
                             int mi_row, int mi_col, RD_COST *rd_cost,
894
                             BLOCK_SIZE bsize, PICK_MODE_CONTEXT *ctx,
895
                             int64_t best_rd) {
896
  VP9_COMMON *const cm = &cpi->common;
897
  TileInfo *const tile_info = &tile_data->tile_info;
898
  MACROBLOCKD *const xd = &x->e_mbd;
899
  MB_MODE_INFO *mbmi;
900
901
  struct macroblock_plane *const p = x->plane;
  struct macroblockd_plane *const pd = xd->plane;
902
  const AQ_MODE aq_mode = cpi->oxcf.aq_mode;
903
  int i, orig_rdmult;
904

905
  vp9_clear_system_state();
Ronald S. Bultje's avatar
Ronald S. Bultje committed
906

907
  // Use the lower precision, but faster, 32x32 fdct for mode selection.
908
  x->use_lp32x32fdct = 1;
909

910
  set_offsets(cpi, tile_info, x, mi_row, mi_col, bsize);
hkuang's avatar
hkuang committed
911
  mbmi = &xd->mi[0].src_mi->mbmi;
912
  mbmi->sb_type = bsize;
913

914
  for (i = 0; i < MAX_MB_PLANE; ++i) {
915
    p[i].coeff = ctx->coeff_pbuf[i][0];
916
    p[i].qcoeff = ctx->qcoeff_pbuf[i][0];
917
    pd[i].dqcoeff = ctx->dqcoeff_pbuf[i][0];
918
    p[i].eobs = ctx->eobs_pbuf[i][0];
919
  }
920
  ctx->is_coded = 0;
921
  ctx->skippable = 0;
922
  ctx->pred_pixel_ready = 0;
923
  x->skip_recode = 0;
924

925
  // Set to zero to make sure we do not use the previous encoded frame stats
926
  mbmi->skip = 0;
927

928
929
930
931
932
933
934
935
936
#if CONFIG_VP9_HIGHBITDEPTH
  if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
    x->source_variance =
        high_get_sby_perpixel_variance(cpi, &x->plane[0].src, bsize, xd->bd);
  } else {
    x->source_variance =
        get_sby_perpixel_variance(cpi, &x->plane[0].src, bsize);
  }
#else
937
  x->source_variance = get_sby_perpixel_variance(cpi, &x->plane[0].src, bsize);
938
#endif  // CONFIG_VP9_HIGHBITDEPTH
939

940
941
942
  // Save rdmult before it might be changed, so it can be restored later.
  orig_rdmult = x->rdmult;

943
  if (aq_mode == VARIANCE_AQ) {
944
945
    const int energy = bsize <= BLOCK_16X16 ? x->mb_energy
                                            : vp9_block_energy(cpi, x, bsize);
Paul Wilkins's avatar
Paul Wilkins committed
946
947
948
    if (cm->frame_type == KEY_FRAME ||
        cpi->refresh_alt_ref_frame ||
        (cpi->refresh_golden_frame && !cpi->rc.is_src_frame_alt_ref)) {
949
      mbmi->segment_id = vp9_vaq_segment_id(energy);
Paul Wilkins's avatar
Paul Wilkins committed
950
951
952
    } else {
      const uint8_t *const map = cm->seg.update_map ? cpi->segmentation_map
                                                    : cm->last_frame_seg_map;
953
      mbmi->segment_id = vp9_get_segment_id(cm, map, bsize, mi_row, mi_col);
Paul Wilkins's avatar
Paul Wilkins committed
954
    }
955
    x->rdmult = set_segment_rdmult(cpi, x, mbmi->segment_id);
956
  } else if (aq_mode == COMPLEXITY_AQ) {
957
    x->rdmult = set_segment_rdmult(cpi, x, mbmi->segment_id);
958
  } else if (aq_mode == CYCLIC_REFRESH_AQ) {
959
    const uint8_t *const map = cm->seg.update_map ? cpi->segmentation_map
960
                                                  : cm->last_frame_seg_map;
961
962
    // If segment 1, use rdmult for that segment.
    if (vp9_get_segment_id(cm, map, bsize, mi_row, mi_col))
963
      x->rdmult = vp9_cyclic_refresh_get_rdmult(cpi->cyclic_refresh);
964
  }
965

966
967
  // Find best coding mode & reconstruct the MB so it is available
  // as a predictor for MBs that follow in the SB
968
  if (frame_is_intra_only(cm)) {
969
    vp9_rd_pick_intra_mode_sb(cpi, x, rd_cost, bsize, ctx, best_rd);
970
  } else {
971
972
    if (bsize >= BLOCK_8X8) {
      if (vp9_segfeature_active(&cm->seg, mbmi->segment_id, SEG_LVL_SKIP))
973
        vp9_rd_pick_inter_mode_sb_seg_skip(cpi, tile_data, x, rd_cost, bsize,
974
                                           ctx, best_rd);
975
      else
976
        vp9_rd_pick_inter_mode_sb(cpi, tile_data, x, mi_row, mi_col,
977
                                  rd_cost, bsize, ctx, best_rd);
978
    } else {
979
      vp9_rd_pick_inter_mode_sub8x8(cpi, tile_data, x, mi_row, mi_col,
980
                                    rd_cost, bsize, ctx, best_rd);
981
    }
982
  }
983

984
985
986
987
988
989
990
991
992
993

  // Examine the resulting rate and for AQ mode 2 make a segment choice.
  if ((rd_cost->rate != INT_MAX) &&
      (aq_mode == COMPLEXITY_AQ) && (bsize >= BLOCK_16X16) &&
      (cm->frame_type == KEY_FRAME ||
       cpi->refresh_alt_ref_frame ||
       (cpi->refresh_golden_frame && !cpi->rc.is_src_frame_alt_ref))) {
    vp9_caq_select_segment(cpi, x, bsize, mi_row, mi_col, rd_cost->rate);
  }

994
995
996
997
998
999
  x->rdmult = orig_rdmult;

  // TODO(jingning) The rate-distortion optimization flow needs to be
  // refactored to provide proper exit/return handle.
  if (rd_cost->rate == INT_MAX)
    rd_cost->rdcost = INT64_MAX;
1000
1001
1002

  ctx->rate = rd_cost->rate;
  ctx->dist = rd_cost->dist;
Ronald S. Bultje's avatar
Ronald S. Bultje committed
1003
}
Adrian Grange's avatar
Adrian Grange committed
1004

1005
1006
static void update_stats(VP9_COMMON *cm, ThreadData *td) {
  const MACROBLOCK *x = &td->mb;
1007
  const MACROBLOCKD *const xd = &x->e_mbd;
hkuang's avatar
hkuang committed
1008
  const MODE_INFO *const mi = xd->mi[0].src_mi;
1009
  const MB_MODE_INFO *const mbmi = &mi->mbmi;
1010
  const BLOCK_SIZE bsize = mbmi->sb_type;
Adrian Grange's avatar
Adrian Grange committed
1011

1012
  if (!frame_is_intra_only(cm)) {
1013
1014
    FRAME_COUNTS *const counts = td->counts;
    const int inter_block = is_inter_block(mbmi);
1015
    const int seg_ref_active = vp9_segfeature_active(&cm->seg, mbmi->segment_id,
1016
                                                     SEG_LVL_REF_FRAME);
1017
1018
1019
1020
1021