vp9_encodeframe.c 151 KB
Newer Older
John Koleszar's avatar
John Koleszar committed
1
/*
2
 *  Copyright (c) 2010 The WebM project authors. All Rights Reserved.
John Koleszar's avatar
John Koleszar committed
3
 *
4
 *  Use of this source code is governed by a BSD-style license
5
6
 *  that can be found in the LICENSE file in the root of the source
 *  tree. An additional intellectual property rights grant can be found
7
 *  in the file PATENTS.  All contributing project authors may
8
 *  be found in the AUTHORS file in the root of the source tree.
John Koleszar's avatar
John Koleszar committed
9
10
 */

Dmitry Kovalev's avatar
Dmitry Kovalev committed
11
12
13
14
#include <limits.h>
#include <math.h>
#include <stdio.h>

Jim Bankoski's avatar
Jim Bankoski committed
15
#include "./vp9_rtcd.h"
Dmitry Kovalev's avatar
Dmitry Kovalev committed
16
17
18
19
#include "./vpx_config.h"

#include "vpx_ports/vpx_timer.h"

20
#include "vp9/common/vp9_common.h"
Yaowu Xu's avatar
Yaowu Xu committed
21
#include "vp9/common/vp9_entropy.h"
22
#include "vp9/common/vp9_entropymode.h"
23
#include "vp9/common/vp9_idct.h"
Dmitry Kovalev's avatar
Dmitry Kovalev committed
24
25
26
#include "vp9/common/vp9_mvref_common.h"
#include "vp9/common/vp9_pred_common.h"
#include "vp9/common/vp9_quant_common.h"
27
#include "vp9/common/vp9_reconintra.h"
Dmitry Kovalev's avatar
Dmitry Kovalev committed
28
#include "vp9/common/vp9_reconinter.h"
29
#include "vp9/common/vp9_seg_common.h"
30
#include "vp9/common/vp9_systemdependent.h"
31
#include "vp9/common/vp9_tile_common.h"
32

33
#include "vp9/encoder/vp9_aq_complexity.h"
Marco Paniconi's avatar
Marco Paniconi committed
34
35
#include "vp9/encoder/vp9_aq_cyclicrefresh.h"
#include "vp9/encoder/vp9_aq_variance.h"
Dmitry Kovalev's avatar
Dmitry Kovalev committed
36
37
38
#include "vp9/encoder/vp9_encodeframe.h"
#include "vp9/encoder/vp9_encodemb.h"
#include "vp9/encoder/vp9_encodemv.h"
39
#include "vp9/encoder/vp9_ethread.h"
40
#include "vp9/encoder/vp9_extend.h"
41
#include "vp9/encoder/vp9_pickmode.h"
42
#include "vp9/encoder/vp9_rd.h"
Dmitry Kovalev's avatar
Dmitry Kovalev committed
43
44
#include "vp9/encoder/vp9_rdopt.h"
#include "vp9/encoder/vp9_segmentation.h"
45
#include "vp9/encoder/vp9_tokenize.h"
46

47
48
49
50
51
52
#define GF_ZEROMV_ZBIN_BOOST 0
#define LF_ZEROMV_ZBIN_BOOST 0
#define MV_ZBIN_BOOST        0
#define SPLIT_MV_ZBIN_BOOST  0
#define INTRA_ZBIN_BOOST     0

53
54
static void encode_superblock(VP9_COMP *cpi, ThreadData * td,
                              TOKENEXTRA **t, int output_enabled,
55
56
                              int mi_row, int mi_col, BLOCK_SIZE bsize,
                              PICK_MODE_CONTEXT *ctx);
57

58
59
60
61
// This is used as a reference when computing the source variance for the
//  purposes of activity masking.
// Eventually this should be replaced by custom no-reference routines,
//  which will be faster.
62
static const uint8_t VP9_VAR_OFFS[64] = {
63
64
65
66
67
68
69
70
    128, 128, 128, 128, 128, 128, 128, 128,
    128, 128, 128, 128, 128, 128, 128, 128,
    128, 128, 128, 128, 128, 128, 128, 128,
    128, 128, 128, 128, 128, 128, 128, 128,
    128, 128, 128, 128, 128, 128, 128, 128,
    128, 128, 128, 128, 128, 128, 128, 128,
    128, 128, 128, 128, 128, 128, 128, 128,
    128, 128, 128, 128, 128, 128, 128, 128
71
72
};

73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
#if CONFIG_VP9_HIGHBITDEPTH
static const uint16_t VP9_HIGH_VAR_OFFS_8[64] = {
    128, 128, 128, 128, 128, 128, 128, 128,
    128, 128, 128, 128, 128, 128, 128, 128,
    128, 128, 128, 128, 128, 128, 128, 128,
    128, 128, 128, 128, 128, 128, 128, 128,
    128, 128, 128, 128, 128, 128, 128, 128,
    128, 128, 128, 128, 128, 128, 128, 128,
    128, 128, 128, 128, 128, 128, 128, 128,
    128, 128, 128, 128, 128, 128, 128, 128
};

static const uint16_t VP9_HIGH_VAR_OFFS_10[64] = {
    128*4, 128*4, 128*4, 128*4, 128*4, 128*4, 128*4, 128*4,
    128*4, 128*4, 128*4, 128*4, 128*4, 128*4, 128*4, 128*4,
    128*4, 128*4, 128*4, 128*4, 128*4, 128*4, 128*4, 128*4,
    128*4, 128*4, 128*4, 128*4, 128*4, 128*4, 128*4, 128*4,
    128*4, 128*4, 128*4, 128*4, 128*4, 128*4, 128*4, 128*4,
    128*4, 128*4, 128*4, 128*4, 128*4, 128*4, 128*4, 128*4,
    128*4, 128*4, 128*4, 128*4, 128*4, 128*4, 128*4, 128*4,
    128*4, 128*4, 128*4, 128*4, 128*4, 128*4, 128*4, 128*4
};

static const uint16_t VP9_HIGH_VAR_OFFS_12[64] = {
    128*16, 128*16, 128*16, 128*16, 128*16, 128*16, 128*16, 128*16,
    128*16, 128*16, 128*16, 128*16, 128*16, 128*16, 128*16, 128*16,
    128*16, 128*16, 128*16, 128*16, 128*16, 128*16, 128*16, 128*16,
    128*16, 128*16, 128*16, 128*16, 128*16, 128*16, 128*16, 128*16,
    128*16, 128*16, 128*16, 128*16, 128*16, 128*16, 128*16, 128*16,
    128*16, 128*16, 128*16, 128*16, 128*16, 128*16, 128*16, 128*16,
    128*16, 128*16, 128*16, 128*16, 128*16, 128*16, 128*16, 128*16,
    128*16, 128*16, 128*16, 128*16, 128*16, 128*16, 128*16, 128*16
};
#endif  // CONFIG_VP9_HIGHBITDEPTH

108
static unsigned int get_sby_perpixel_variance(VP9_COMP *cpi,
109
                                              const struct buf_2d *ref,
110
                                              BLOCK_SIZE bs) {
111
112
113
  unsigned int sse;
  const unsigned int var = cpi->fn_ptr[bs].vf(ref->buf, ref->stride,
                                              VP9_VAR_OFFS, 0, &sse);
114
  return ROUND_POWER_OF_TWO(var, num_pels_log2_lookup[bs]);
115
116
}

117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
#if CONFIG_VP9_HIGHBITDEPTH
static unsigned int high_get_sby_perpixel_variance(
    VP9_COMP *cpi, const struct buf_2d *ref, BLOCK_SIZE bs, int bd) {
  unsigned int var, sse;
  switch (bd) {
    case 10:
      var = cpi->fn_ptr[bs].vf(ref->buf, ref->stride,
                               CONVERT_TO_BYTEPTR(VP9_HIGH_VAR_OFFS_10),
                               0, &sse);
      break;
    case 12:
      var = cpi->fn_ptr[bs].vf(ref->buf, ref->stride,
                               CONVERT_TO_BYTEPTR(VP9_HIGH_VAR_OFFS_12),
                               0, &sse);
      break;
    case 8:
    default:
      var = cpi->fn_ptr[bs].vf(ref->buf, ref->stride,
                               CONVERT_TO_BYTEPTR(VP9_HIGH_VAR_OFFS_8),
                               0, &sse);
      break;
  }
  return ROUND_POWER_OF_TWO(var, num_pels_log2_lookup[bs]);
}
#endif  // CONFIG_VP9_HIGHBITDEPTH

143
static unsigned int get_sby_perpixel_diff_variance(VP9_COMP *cpi,
144
145
                                                   const struct buf_2d *ref,
                                                   int mi_row, int mi_col,
146
                                                   BLOCK_SIZE bs) {
147
148
  unsigned int sse, var;
  uint8_t *last_y;
149
  const YV12_BUFFER_CONFIG *last = get_ref_frame_buffer(cpi, LAST_FRAME);
150
151
152
153
154

  assert(last != NULL);
  last_y =
      &last->y_buffer[mi_row * MI_SIZE * last->y_stride + mi_col * MI_SIZE];
  var = cpi->fn_ptr[bs].vf(ref->buf, ref->stride, last_y, last->y_stride, &sse);
155
156
157
  return ROUND_POWER_OF_TWO(var, num_pels_log2_lookup[bs]);
}

158
static BLOCK_SIZE get_rd_var_based_fixed_partition(VP9_COMP *cpi, MACROBLOCK *x,
159
160
                                                   int mi_row,
                                                   int mi_col) {
161
  unsigned int var = get_sby_perpixel_diff_variance(cpi, &x->plane[0].src,
162
163
164
                                                    mi_row, mi_col,
                                                    BLOCK_64X64);
  if (var < 8)
165
    return BLOCK_64X64;
166
  else if (var < 128)
167
    return BLOCK_32X32;
168
169
170
171
  else if (var < 2048)
    return BLOCK_16X16;
  else
    return BLOCK_8X8;
172
173
}

174
static BLOCK_SIZE get_nonrd_var_based_fixed_partition(VP9_COMP *cpi,
175
                                                      MACROBLOCK *x,
176
177
                                                      int mi_row,
                                                      int mi_col) {
178
  unsigned int var = get_sby_perpixel_diff_variance(cpi, &x->plane[0].src,
179
180
                                                    mi_row, mi_col,
                                                    BLOCK_64X64);
181
  if (var < 4)
182
    return BLOCK_64X64;
183
  else if (var < 10)
184
185
    return BLOCK_32X32;
  else
186
    return BLOCK_16X16;
187
188
}

189
190
// Lighter version of set_offsets that only sets the mode info
// pointers.
Jingning Han's avatar
Jingning Han committed
191
192
193
194
static INLINE void set_mode_info_offsets(VP9_COMMON *const cm,
                                         MACROBLOCKD *const xd,
                                         int mi_row,
                                         int mi_col) {
195
  const int idx_str = xd->mi_stride * mi_row + mi_col;
hkuang's avatar
hkuang committed
196
197
  xd->mi = cm->mi + idx_str;
  xd->mi[0].src_mi = &xd->mi[0];
198
199
200
}

static void set_offsets(VP9_COMP *cpi, const TileInfo *const tile,
201
202
                        MACROBLOCK *const x, int mi_row, int mi_col,
                        BLOCK_SIZE bsize) {
203
204
205
206
207
208
209
  VP9_COMMON *const cm = &cpi->common;
  MACROBLOCKD *const xd = &x->e_mbd;
  MB_MODE_INFO *mbmi;
  const int mi_width = num_8x8_blocks_wide_lookup[bsize];
  const int mi_height = num_8x8_blocks_high_lookup[bsize];
  const struct segmentation *const seg = &cm->seg;

210
  set_skip_context(xd, mi_row, mi_col);
211

Jingning Han's avatar
Jingning Han committed
212
  set_mode_info_offsets(cm, xd, mi_row, mi_col);
213

hkuang's avatar
hkuang committed
214
  mbmi = &xd->mi[0].src_mi->mbmi;
215
216

  // Set up destination pointers.
217
  vp9_setup_dst_planes(xd->plane, get_frame_new_buffer(cm), mi_row, mi_col);
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234

  // Set up limit values for MV components.
  // Mv beyond the range do not produce new/different prediction block.
  x->mv_row_min = -(((mi_row + mi_height) * MI_SIZE) + VP9_INTERP_EXTEND);
  x->mv_col_min = -(((mi_col + mi_width) * MI_SIZE) + VP9_INTERP_EXTEND);
  x->mv_row_max = (cm->mi_rows - mi_row) * MI_SIZE + VP9_INTERP_EXTEND;
  x->mv_col_max = (cm->mi_cols - mi_col) * MI_SIZE + VP9_INTERP_EXTEND;

  // Set up distance of MB to edge of frame in 1/8th pel units.
  assert(!(mi_col & (mi_width - 1)) && !(mi_row & (mi_height - 1)));
  set_mi_row_col(xd, tile, mi_row, mi_height, mi_col, mi_width,
                 cm->mi_rows, cm->mi_cols);

  // Set up source buffers.
  vp9_setup_src_planes(x, cpi->Source, mi_row, mi_col);

  // R/D setup.
Dmitry Kovalev's avatar
Dmitry Kovalev committed
235
236
  x->rddiv = cpi->rd.RDDIV;
  x->rdmult = cpi->rd.RDMULT;
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253

  // Setup segment ID.
  if (seg->enabled) {
    if (cpi->oxcf.aq_mode != VARIANCE_AQ) {
      const uint8_t *const map = seg->update_map ? cpi->segmentation_map
                                                 : cm->last_frame_seg_map;
      mbmi->segment_id = vp9_get_segment_id(cm, map, bsize, mi_row, mi_col);
    }
    vp9_init_plane_quantizers(cpi, x);

    x->encode_breakout = cpi->segment_encode_breakout[mbmi->segment_id];
  } else {
    mbmi->segment_id = 0;
    x->encode_breakout = cpi->encode_breakout;
  }
}

254
255
256
static void duplicate_mode_info_in_sb(VP9_COMMON *cm, MACROBLOCKD *xd,
                                      int mi_row, int mi_col,
                                      BLOCK_SIZE bsize) {
257
258
259
260
261
262
  const int block_width = num_8x8_blocks_wide_lookup[bsize];
  const int block_height = num_8x8_blocks_high_lookup[bsize];
  int i, j;
  for (j = 0; j < block_height; ++j)
    for (i = 0; i < block_width; ++i) {
      if (mi_row + j < cm->mi_rows && mi_col + i < cm->mi_cols)
hkuang's avatar
hkuang committed
263
        xd->mi[j * xd->mi_stride + i].src_mi = &xd->mi[0];
264
265
266
267
    }
}

static void set_block_size(VP9_COMP * const cpi,
268
                           MACROBLOCKD *const xd,
269
270
271
                           int mi_row, int mi_col,
                           BLOCK_SIZE bsize) {
  if (cpi->common.mi_cols > mi_col && cpi->common.mi_rows > mi_row) {
Jingning Han's avatar
Jingning Han committed
272
    set_mode_info_offsets(&cpi->common, xd, mi_row, mi_col);
hkuang's avatar
hkuang committed
273
    xd->mi[0].src_mi->mbmi.sb_type = bsize;
274
275
276
277
278
279
  }
}

typedef struct {
  int64_t sum_square_error;
  int64_t sum_error;
280
  int log2_count;
281
282
283
284
285
286
287
288
289
290
291
292
  int variance;
} var;

typedef struct {
  var none;
  var horz[2];
  var vert[2];
} partition_variance;

typedef struct {
  partition_variance part_variances;
  var split[4];
293
294
295
296
297
} v4x4;

typedef struct {
  partition_variance part_variances;
  v4x4 split[4];
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
} v8x8;

typedef struct {
  partition_variance part_variances;
  v8x8 split[4];
} v16x16;

typedef struct {
  partition_variance part_variances;
  v16x16 split[4];
} v32x32;

typedef struct {
  partition_variance part_variances;
  v32x32 split[4];
} v64x64;

typedef struct {
  partition_variance *part_variances;
  var *split[4];
} variance_node;

typedef enum {
  V16X16,
  V32X32,
  V64X64,
} TREE_LEVEL;

static void tree_to_node(void *data, BLOCK_SIZE bsize, variance_node *node) {
  int i;
328
  node->part_variances = NULL;
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
  switch (bsize) {
    case BLOCK_64X64: {
      v64x64 *vt = (v64x64 *) data;
      node->part_variances = &vt->part_variances;
      for (i = 0; i < 4; i++)
        node->split[i] = &vt->split[i].part_variances.none;
      break;
    }
    case BLOCK_32X32: {
      v32x32 *vt = (v32x32 *) data;
      node->part_variances = &vt->part_variances;
      for (i = 0; i < 4; i++)
        node->split[i] = &vt->split[i].part_variances.none;
      break;
    }
    case BLOCK_16X16: {
      v16x16 *vt = (v16x16 *) data;
      node->part_variances = &vt->part_variances;
      for (i = 0; i < 4; i++)
        node->split[i] = &vt->split[i].part_variances.none;
      break;
    }
    case BLOCK_8X8: {
      v8x8 *vt = (v8x8 *) data;
      node->part_variances = &vt->part_variances;
354
355
356
357
358
359
360
      for (i = 0; i < 4; i++)
        node->split[i] = &vt->split[i].part_variances.none;
      break;
    }
    case BLOCK_4X4: {
      v4x4 *vt = (v4x4 *) data;
      node->part_variances = &vt->part_variances;
361
362
363
364
365
366
      for (i = 0; i < 4; i++)
        node->split[i] = &vt->split[i];
      break;
    }
    default: {
      assert(0);
367
      break;
368
369
370
371
372
373
374
375
    }
  }
}

// Set variance values given sum square error, sum error, count.
static void fill_variance(int64_t s2, int64_t s, int c, var *v) {
  v->sum_square_error = s2;
  v->sum_error = s;
376
  v->log2_count = c;
377
378
379
}

static void get_variance(var *v) {
380
381
  v->variance = (int)(256 * (v->sum_square_error -
      ((v->sum_error * v->sum_error) >> v->log2_count)) >> v->log2_count);
382
383
384
}

void sum_2_variances(const var *a, const var *b, var *r) {
385
  assert(a->log2_count == b->log2_count);
386
  fill_variance(a->sum_square_error + b->sum_square_error,
387
                a->sum_error + b->sum_error, a->log2_count + 1, r);
388
389
390
391
392
393
394
395
396
397
398
399
400
401
}

static void fill_variance_tree(void *data, BLOCK_SIZE bsize) {
  variance_node node;
  tree_to_node(data, bsize, &node);
  sum_2_variances(node.split[0], node.split[1], &node.part_variances->horz[0]);
  sum_2_variances(node.split[2], node.split[3], &node.part_variances->horz[1]);
  sum_2_variances(node.split[0], node.split[2], &node.part_variances->vert[0]);
  sum_2_variances(node.split[1], node.split[3], &node.part_variances->vert[1]);
  sum_2_variances(&node.part_variances->vert[0], &node.part_variances->vert[1],
                  &node.part_variances->none);
}

static int set_vt_partitioning(VP9_COMP *cpi,
402
                               MACROBLOCKD *const xd,
403
404
405
                               void *data,
                               BLOCK_SIZE bsize,
                               int mi_row,
406
                               int mi_col,
407
                               int64_t threshold,
408
409
                               BLOCK_SIZE bsize_min,
                               int segment_id) {
410
411
412
413
  VP9_COMMON * const cm = &cpi->common;
  variance_node vt;
  const int block_width = num_8x8_blocks_wide_lookup[bsize];
  const int block_height = num_8x8_blocks_high_lookup[bsize];
414

415
416
417
  assert(block_height == block_width);
  tree_to_node(data, bsize, &vt);

418
  // No 64x64 blocks on segments other than base (un-boosted) segment.
419
  if (cyclic_refresh_segment_id_boosted(segment_id) && bsize == BLOCK_64X64)
420
421
    return 0;

422
  // For bsize=bsize_min (16x16/8x8 for 8x8/4x4 downsampling), select if
423
424
  // variance is below threshold, otherwise split will be selected.
  // No check for vert/horiz split as too few samples for variance.
425
  if (bsize == bsize_min) {
426
    get_variance(&vt.part_variances->none);
427
428
    if (mi_col + block_width / 2 < cm->mi_cols &&
        mi_row + block_height / 2 < cm->mi_rows &&
429
        vt.part_variances->none.variance < threshold) {
430
431
432
433
      set_block_size(cpi, xd, mi_row, mi_col, bsize);
      return 1;
    }
    return 0;
434
  } else if (bsize > bsize_min) {
435
    get_variance(&vt.part_variances->none);
436
437
    // For key frame or low_res: for bsize above 32X32 or very high variance,
    // take split.
438
439
    if (cm->frame_type == KEY_FRAME &&
        (bsize > BLOCK_32X32 ||
440
        vt.part_variances->none.variance > (threshold << 4))) {
441
442
443
444
445
      return 0;
    }
    // If variance is low, take the bsize (no split).
    if (mi_col + block_width / 2 < cm->mi_cols &&
        mi_row + block_height / 2 < cm->mi_rows &&
446
        vt.part_variances->none.variance < threshold) {
447
448
449
      set_block_size(cpi, xd, mi_row, mi_col, bsize);
      return 1;
    }
450

451
    // Check vertical split.
452
453
454
    if (mi_row + block_height / 2 < cm->mi_rows) {
      get_variance(&vt.part_variances->vert[0]);
      get_variance(&vt.part_variances->vert[1]);
455
456
      if (vt.part_variances->vert[0].variance < threshold &&
          vt.part_variances->vert[1].variance < threshold) {
457
458
459
460
461
        BLOCK_SIZE subsize = get_subsize(bsize, PARTITION_VERT);
        set_block_size(cpi, xd, mi_row, mi_col, subsize);
        set_block_size(cpi, xd, mi_row, mi_col + block_width / 2, subsize);
        return 1;
      }
462
    }
463
    // Check horizontal split.
464
465
466
    if (mi_col + block_width / 2 < cm->mi_cols) {
      get_variance(&vt.part_variances->horz[0]);
      get_variance(&vt.part_variances->horz[1]);
467
468
      if (vt.part_variances->horz[0].variance < threshold &&
          vt.part_variances->horz[1].variance < threshold) {
469
470
471
472
473
        BLOCK_SIZE subsize = get_subsize(bsize, PARTITION_HORZ);
        set_block_size(cpi, xd, mi_row, mi_col, subsize);
        set_block_size(cpi, xd, mi_row + block_height / 2, mi_col, subsize);
        return 1;
      }
474
    }
475

476
    return 0;
477
478
479
480
  }
  return 0;
}

Yaowu Xu's avatar
Yaowu Xu committed
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516

void vp9_set_vbp_thresholds(VP9_COMP *cpi, int q) {
  SPEED_FEATURES *const sf = &cpi->sf;
  if (sf->partition_search_type != VAR_BASED_PARTITION) {
    return;
  } else {
    VP9_COMMON *const cm = &cpi->common;
    const VP9EncoderConfig *const oxcf = &cpi->oxcf;
    const int is_key_frame = (cm->frame_type == KEY_FRAME);
    const int use_4x4_partition = is_key_frame;
    const int low_res = (cm->width <= 352 && cm->height <= 288);
    const int threshold_multiplier = is_key_frame ? 80 : 4;
    const int64_t threshold_base = (int64_t)(threshold_multiplier *
        vp9_convert_qindex_to_q(q, cm->bit_depth));
    cpi->vbp_threshold = threshold_base;
    cpi->vbp_threshold_bsize_min = threshold_base << oxcf->speed;
    cpi->vbp_threshold_bsize_max = threshold_base;

    if (is_key_frame) {
      cpi->vbp_threshold = threshold_base >> 2;
      cpi->vbp_threshold_bsize_min = threshold_base << 2;
    } else if (low_res) {
      cpi->vbp_threshold_bsize_min = threshold_base << 3;
      cpi->vbp_threshold_bsize_max = threshold_base >> 2;
    }
    // TODO(marpan): Allow 4x4 partitions for inter-frames.
    // use_4x4_partition = (variance4x4downsample[i2 + j] == 1);
    // If 4x4 partition is not used, then 8x8 partition will be selected
    // if variance of 16x16 block is very high, so use larger threshold
    // for 16x16 (threshold_bsize_min) in that case.
    cpi->vbp_threshold_16x16 = (use_4x4_partition) ?
      cpi->vbp_threshold : cpi->vbp_threshold_bsize_min;
    cpi->vbp_bsize_min = (use_4x4_partition) ? BLOCK_8X8 : BLOCK_16X16;
  }
}

517
// This function chooses partitioning based on the variance between source and
518
// reconstructed last, where variance is computed for down-sampled inputs.
519
520
static void choose_partitioning(VP9_COMP *cpi,
                                const TileInfo *const tile,
521
                                MACROBLOCK *x,
522
523
                                int mi_row, int mi_col) {
  VP9_COMMON * const cm = &cpi->common;
524
  MACROBLOCKD *xd = &x->e_mbd;
525
  int i, j, k, m;
526
  v64x64 vt;
527
  v16x16 vt2[16];
528
529
530
531
532
  uint8_t *s;
  const uint8_t *d;
  int sp;
  int dp;
  int pixels_wide = 64, pixels_high = 64;
Yaowu Xu's avatar
Yaowu Xu committed
533

534
  // Always use 4x4 partition for key frame.
535
536
  const int is_key_frame = (cm->frame_type == KEY_FRAME);
  const int use_4x4_partition = is_key_frame;
Yaowu Xu's avatar
Yaowu Xu committed
537
  const int low_res = (cm->width <= 352 && cm->height <= 288);
538
539
  int variance4x4downsample[16];

540
541
542
543
544
545
546
  int segment_id = CR_SEGMENT_ID_BASE;
  if (cpi->oxcf.aq_mode == CYCLIC_REFRESH_AQ && cm->seg.enabled) {
    const uint8_t *const map = cm->seg.update_map ? cpi->segmentation_map :
                                                    cm->last_frame_seg_map;
    segment_id = vp9_get_segment_id(cm, map, BLOCK_64X64, mi_row, mi_col);
  }

547
  set_offsets(cpi, tile, x, mi_row, mi_col, BLOCK_64X64);
548
549
550
551
552
553
554
555
556

  if (xd->mb_to_right_edge < 0)
    pixels_wide += (xd->mb_to_right_edge >> 3);
  if (xd->mb_to_bottom_edge < 0)
    pixels_high += (xd->mb_to_bottom_edge >> 3);

  s = x->plane[0].src.buf;
  sp = x->plane[0].src.stride;

557
  if (!is_key_frame) {
558
    MB_MODE_INFO *mbmi = &xd->mi[0].src_mi->mbmi;
559
    unsigned int uv_sad;
560
    const YV12_BUFFER_CONFIG *yv12 = get_ref_frame_buffer(cpi, LAST_FRAME);
561

562
563
    const YV12_BUFFER_CONFIG *yv12_g = get_ref_frame_buffer(cpi, GOLDEN_FRAME);
    unsigned int y_sad, y_sad_g;
564
    BLOCK_SIZE bsize;
565
566
567
568
569
570
571
572
    if (mi_row + 4 < cm->mi_rows && mi_col + 4 < cm->mi_cols)
      bsize = BLOCK_64X64;
    else if (mi_row + 4 < cm->mi_rows && mi_col + 4 >= cm->mi_cols)
      bsize = BLOCK_32X64;
    else if (mi_row + 4 >= cm->mi_rows && mi_col + 4 < cm->mi_cols)
      bsize = BLOCK_64X32;
    else
      bsize = BLOCK_32X32;
573

574
    assert(yv12 != NULL);
575

576
577
578
579
580
581
582
583
584
585
    if (yv12_g && yv12_g != yv12) {
      vp9_setup_pre_planes(xd, 0, yv12_g, mi_row, mi_col,
                           &cm->frame_refs[GOLDEN_FRAME - 1].sf);
      y_sad_g = cpi->fn_ptr[bsize].sdf(x->plane[0].src.buf,
                                       x->plane[0].src.stride,
                                       xd->plane[0].pre[0].buf,
                                       xd->plane[0].pre[0].stride);
    } else {
      y_sad_g = UINT_MAX;
    }
586

Yaowu Xu's avatar
Yaowu Xu committed
587
    vp9_setup_pre_planes(xd, 0, yv12, mi_row, mi_col,
588
                         &cm->frame_refs[LAST_FRAME - 1].sf);
589
590
591
592
    mbmi->ref_frame[0] = LAST_FRAME;
    mbmi->ref_frame[1] = NONE;
    mbmi->sb_type = BLOCK_64X64;
    mbmi->mv[0].as_int = 0;
593
    mbmi->interp_filter = BILINEAR;
594

595
    y_sad = vp9_int_pro_motion_estimation(cpi, x, bsize);
596
597
598
599
600
601
602
603
604
    if (y_sad_g < y_sad) {
      vp9_setup_pre_planes(xd, 0, yv12_g, mi_row, mi_col,
                           &cm->frame_refs[GOLDEN_FRAME - 1].sf);
      mbmi->ref_frame[0] = GOLDEN_FRAME;
      mbmi->mv[0].as_int = 0;
      y_sad = y_sad_g;
    } else {
      x->pred_mv[LAST_FRAME] = mbmi->mv[0].as_mv;
    }
605

606
    vp9_build_inter_predictors_sb(xd, mi_row, mi_col, BLOCK_64X64);
607
608
609
610

    for (i = 1; i <= 2; ++i) {
      struct macroblock_plane  *p = &x->plane[i];
      struct macroblockd_plane *pd = &xd->plane[i];
Yaowu Xu's avatar
Yaowu Xu committed
611
      const BLOCK_SIZE bs = get_plane_block_size(bsize, pd);
612

Yaowu Xu's avatar
Yaowu Xu committed
613
      if (bs == BLOCK_INVALID)
614
        uv_sad = UINT_MAX;
Yaowu Xu's avatar
Yaowu Xu committed
615
616
617
      else
        uv_sad = cpi->fn_ptr[bs].sdf(p->src.buf, p->src.stride,
                                     pd->dst.buf, pd->dst.stride);
618

619
      x->color_sensitivity[i - 1] = uv_sad > (y_sad >> 2);
620
    }
621
622
623
624
625
626

    d = xd->plane[0].dst.buf;
    dp = xd->plane[0].dst.stride;
  } else {
    d = VP9_VAR_OFFS;
    dp = 0;
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
#if CONFIG_VP9_HIGHBITDEPTH
    if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
      switch (xd->bd) {
        case 10:
          d = CONVERT_TO_BYTEPTR(VP9_HIGH_VAR_OFFS_10);
          break;
        case 12:
          d = CONVERT_TO_BYTEPTR(VP9_HIGH_VAR_OFFS_12);
          break;
        case 8:
        default:
          d = CONVERT_TO_BYTEPTR(VP9_HIGH_VAR_OFFS_8);
          break;
      }
    }
#endif  // CONFIG_VP9_HIGHBITDEPTH
643
644
  }

645
646
  // Fill in the entire tree of 8x8 (or 4x4 under some conditions) variances
  // for splits.
647
648
649
  for (i = 0; i < 4; i++) {
    const int x32_idx = ((i & 1) << 5);
    const int y32_idx = ((i >> 1) << 5);
650
    const int i2 = i << 2;
651
652
653
654
    for (j = 0; j < 4; j++) {
      const int x16_idx = x32_idx + ((j & 1) << 4);
      const int y16_idx = y32_idx + ((j >> 1) << 4);
      v16x16 *vst = &vt.split[i].split[j];
655
      variance4x4downsample[i2 + j] = 0;
656
      if (!is_key_frame) {
657
658
659
660
661
662
663
        for (k = 0; k < 4; k++) {
          int x8_idx = x16_idx + ((k & 1) << 3);
          int y8_idx = y16_idx + ((k >> 1) << 3);
            unsigned int sse = 0;
            int sum = 0;
            if (x8_idx < pixels_wide && y8_idx < pixels_high) {
              int s_avg, d_avg;
664
#if CONFIG_VP9_HIGHBITDEPTH
665
666
667
668
669
670
671
672
              if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
                s_avg = vp9_highbd_avg_8x8(s + y8_idx * sp + x8_idx, sp);
                d_avg = vp9_highbd_avg_8x8(d + y8_idx * dp + x8_idx, dp);
              } else {
                s_avg = vp9_avg_8x8(s + y8_idx * sp + x8_idx, sp);
                d_avg = vp9_avg_8x8(d + y8_idx * dp + x8_idx, dp);
             }
#else
673
674
              s_avg = vp9_avg_8x8(s + y8_idx * sp + x8_idx, sp);
              d_avg = vp9_avg_8x8(d + y8_idx * dp + x8_idx, dp);
675
#endif
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
              sum = s_avg - d_avg;
              sse = sum * sum;
            }
            // If variance is based on 8x8 downsampling, we stop here and have
            // one sample for 8x8 block (so use 1 for count in fill_variance),
            // which of course means variance = 0 for 8x8 block.
            fill_variance(sse, sum, 0, &vst->split[k].part_variances.none);
        }
        fill_variance_tree(&vt.split[i].split[j], BLOCK_16X16);
        // For low-resolution, compute the variance based on 8x8 down-sampling,
        // and if it is large (above the threshold) we go down for 4x4.
        // For key frame we always go down to 4x4.
        if (low_res)
          get_variance(&vt.split[i].split[j].part_variances.none);
      }
691
      if (is_key_frame || (low_res &&
692
          vt.split[i].split[j].part_variances.none.variance >
Yaowu Xu's avatar
Yaowu Xu committed
693
          (cpi->vbp_threshold << 1))) {
694
695
696
697
698
        // Go down to 4x4 down-sampling for variance.
        variance4x4downsample[i2 + j] = 1;
        for (k = 0; k < 4; k++) {
          int x8_idx = x16_idx + ((k & 1) << 3);
          int y8_idx = y16_idx + ((k >> 1) << 3);
699
          v8x8 *vst2 = is_key_frame ? &vst->split[k] :
700
              &vt2[i2 + j].split[k];
701
702
703
704
705
706
          for (m = 0; m < 4; m++) {
            int x4_idx = x8_idx + ((m & 1) << 2);
            int y4_idx = y8_idx + ((m >> 1) << 2);
            unsigned int sse = 0;
            int sum = 0;
            if (x4_idx < pixels_wide && y4_idx < pixels_high) {
707
              int d_avg = 128;
708
709
710
711
#if CONFIG_VP9_HIGHBITDEPTH
              int s_avg;
              if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
                s_avg = vp9_highbd_avg_4x4(s + y4_idx * sp + x4_idx, sp);
712
713
                if (cm->frame_type != KEY_FRAME)
                  d_avg = vp9_highbd_avg_4x4(d + y4_idx * dp + x4_idx, dp);
714
715
              } else {
                s_avg = vp9_avg_4x4(s + y4_idx * sp + x4_idx, sp);
716
717
                if (cm->frame_type != KEY_FRAME)
                  d_avg = vp9_avg_4x4(d + y4_idx * dp + x4_idx, dp);
718
719
              }
#else
720
              int s_avg = vp9_avg_4x4(s + y4_idx * sp + x4_idx, sp);
721
              if (!is_key_frame)
722
                d_avg = vp9_avg_4x4(d + y4_idx * dp + x4_idx, dp);
723
#endif
724
              sum = s_avg - d_avg;
725
726
              sse = sum * sum;
            }
727
            // If variance is based on 4x4 down-sampling, we stop here and have
728
729
            // one sample for 4x4 block (so use 1 for count in fill_variance),
            // which of course means variance = 0 for 4x4 block.
730
            fill_variance(sse, sum, 0, &vst2->split[m].part_variances.none);
731
          }
732
        }
733
734
735
      }
    }
  }
736

737
738
  // Fill the rest of the variance tree by summing split partition values.
  for (i = 0; i < 4; i++) {
739
    const int i2 = i << 2;
740
    for (j = 0; j < 4; j++) {
741
      if (variance4x4downsample[i2 + j] == 1) {
742
        v16x16 *vtemp = (!is_key_frame) ? &vt2[i2 + j] :
743
            &vt.split[i].split[j];
744
        for (m = 0; m < 4; m++) {
745
          fill_variance_tree(&vtemp->split[m], BLOCK_8X8);
746
        }
747
        fill_variance_tree(vtemp, BLOCK_16X16);
748
      }
749
750
751
752
753
754
    }
    fill_variance_tree(&vt.split[i], BLOCK_32X32);
  }
  fill_variance_tree(&vt, BLOCK_64X64);

  // Now go through the entire structure,  splitting every block size until
755
  // we get to one that's got a variance lower than our threshold.
756
  if ( mi_col + 8 > cm->mi_cols || mi_row + 8 > cm->mi_rows ||
757
      !set_vt_partitioning(cpi, xd, &vt, BLOCK_64X64, mi_row, mi_col,
758
759
                           cpi->vbp_threshold_bsize_max, BLOCK_16X16,
                           segment_id)) {
760
761
762
    for (i = 0; i < 4; ++i) {
      const int x32_idx = ((i & 1) << 2);
      const int y32_idx = ((i >> 1) << 2);
763
      const int i2 = i << 2;
764
      if (!set_vt_partitioning(cpi, xd, &vt.split[i], BLOCK_32X32,
765
                               (mi_row + y32_idx), (mi_col + x32_idx),
Yaowu Xu's avatar
Yaowu Xu committed
766
                               cpi->vbp_threshold,
767
                               BLOCK_16X16, segment_id)) {
768
769
770
        for (j = 0; j < 4; ++j) {
          const int x16_idx = ((j & 1) << 1);
          const int y16_idx = ((j >> 1) << 1);
771
772
773
          // For inter frames: if variance4x4downsample[] == 1 for this 16x16
          // block, then the variance is based on 4x4 down-sampling, so use vt2
          // in set_vt_partioning(), otherwise use vt.
774
          v16x16 *vtemp = (!is_key_frame &&
775
776
777
                           variance4x4downsample[i2 + j] == 1) ?
                           &vt2[i2 + j] : &vt.split[i].split[j];
          if (!set_vt_partitioning(cpi, xd, vtemp, BLOCK_16X16,
778
                                   mi_row + y32_idx + y16_idx,
779
                                   mi_col + x32_idx + x16_idx,
Yaowu Xu's avatar
Yaowu Xu committed
780
                                   cpi->vbp_threshold_16x16,
781
                                   cpi->vbp_bsize_min, segment_id)) {
782
783
784
            for (k = 0; k < 4; ++k) {
              const int x8_idx = (k & 1);
              const int y8_idx = (k >> 1);
785
786
              if (use_4x4_partition) {
                if (!set_vt_partitioning(cpi, xd, &vtemp->split[k],
787
788
                                         BLOCK_8X8,
                                         mi_row + y32_idx + y16_idx + y8_idx,
789
                                         mi_col + x32_idx + x16_idx + x8_idx,
Yaowu Xu's avatar
Yaowu Xu committed
790
                                         cpi->vbp_threshold_bsize_min,
791
                                         BLOCK_8X8, segment_id)) {
792
793
794
795
                  set_block_size(cpi, xd,
                                 (mi_row + y32_idx + y16_idx + y8_idx),
                                 (mi_col + x32_idx + x16_idx + x8_idx),
                                 BLOCK_4X4);
796
797
798
799
800
801
                }
              } else {
                set_block_size(cpi, xd,
                               (mi_row + y32_idx + y16_idx + y8_idx),
                               (mi_col + x32_idx + x16_idx + x8_idx),
                               BLOCK_8X8);
802
              }
803
804
805
806
807
808
809
810
            }
          }
        }
      }
    }
  }
}

811
812
static void update_state(VP9_COMP *cpi, ThreadData *td,
                         PICK_MODE_CONTEXT *ctx,
813
814
                         int mi_row, int mi_col, BLOCK_SIZE bsize,
                         int output_enabled) {
Ronald S. Bultje's avatar
Ronald S. Bultje committed
815
  int i, x_idx, y;
816
  VP9_COMMON *const cm = &cpi->common;
817
818
  RD_COUNTS *const rdc = &td->rd_counts;
  MACROBLOCK *const x = &td->mb;
819
  MACROBLOCKD *const xd = &x->e_mbd;
820
821
  struct macroblock_plane *const p = x->plane;
  struct macroblockd_plane *const pd = xd->plane;
John Koleszar's avatar
John Koleszar committed
822
  MODE_INFO *mi = &ctx->mic;
hkuang's avatar
hkuang committed
823
824
  MB_MODE_INFO *const mbmi = &xd->mi[0].src_mi->mbmi;
  MODE_INFO *mi_addr = &xd->mi[0];
825
  const struct segmentation *const seg = &cm->seg;
826
827
828
829
830
831
832
  const int bw = num_8x8_blocks_wide_lookup[mi->mbmi.sb_type];
  const int bh = num_8x8_blocks_high_lookup[mi->mbmi.sb_type];
  const int x_mis = MIN(bw, cm->mi_cols - mi_col);
  const int y_mis = MIN(bh, cm->mi_rows - mi_row);
  MV_REF *const frame_mvs =
      cm->cur_frame->mvs + mi_row * cm->mi_cols + mi_col;
  int w, h;
833

834
  const int mis = cm->mi_stride;
Jim Bankoski's avatar
Jim Bankoski committed
835
  const int mi_width = num_8x8_blocks_wide_lookup[bsize];
Jim Bankoski's avatar
Jim Bankoski committed
836
  const int mi_height = num_8x8_blocks_high_lookup[bsize];
837
  int max_plane;
Adrian Grange's avatar
Adrian Grange committed
838

839
  assert(mi->mbmi.sb_type == bsize);
840

841
  *mi_addr = *mi;
hkuang's avatar
hkuang committed
842
  mi_addr->src_mi = mi_addr;
843

Paul Wilkins's avatar
Paul Wilkins committed
844
  // If segmentation in use
845
  if (seg->enabled) {
Paul Wilkins's avatar
Paul Wilkins committed
846
847
848
849
850
851
852
853
854
    // For in frame complexity AQ copy the segment id from the segment map.
    if (cpi->oxcf.aq_mode == COMPLEXITY_AQ) {
      const uint8_t *const map = seg->update_map ? cpi->segmentation_map
                                                 : cm->last_frame_seg_map;
      mi_addr->mbmi.segment_id =
        vp9_get_segment_id(cm, map, bsize, mi_row, mi_col);
    }
    // Else for cyclic refresh mode update the segment map, set the segment id
    // and then update the quantizer.
855
    if (cpi->oxcf.aq_mode == CYCLIC_REFRESH_AQ) {
856
857
      vp9_cyclic_refresh_update_segment(cpi, &xd->mi[0].src_mi->mbmi, mi_row,
                                        mi_col, bsize, ctx->rate, ctx->dist);
Paul Wilkins's avatar
Paul Wilkins committed
858
    }
859
  }
860

861
862
  max_plane = is_inter_block(mbmi) ? MAX_MB_PLANE : 1;
  for (i = 0; i < max_plane; ++i) {
863
    p[i].coeff = ctx->coeff_pbuf[i][1];
864
    p[i].qcoeff = ctx->qcoeff_pbuf[i][1];
865
    pd[i].dqcoeff = ctx->dqcoeff_pbuf[i][1];
866
    p[i].eobs = ctx->eobs_pbuf[i][1];
867
868
  }

869
870
  for (i = max_plane; i < MAX_MB_PLANE; ++i) {
    p[i].coeff = ctx->coeff_pbuf[i][2];
871
    p[i].qcoeff = ctx->qcoeff_pbuf[i][2];
872
    pd[i].dqcoeff = ctx->dqcoeff_pbuf[i][2];
873
    p[i].eobs = ctx->eobs_pbuf[i][2];
874
875
  }

John Koleszar's avatar
John Koleszar committed
876
877
  // Restore the coding context of the MB to that that was in place
  // when the mode was picked for it
878
879
  for (y = 0; y < mi_height; y++)
    for (x_idx = 0; x_idx < mi_width; x_idx++)
James Zern's avatar
James Zern committed
880
      if ((xd->mb_to_right_edge >> (3 + MI_SIZE_LOG2)) + mi_width > x_idx
881
        && (xd->mb_to_bottom_edge >> (3 + MI_SIZE_LOG2)) + mi_height > y) {
hkuang's avatar
hkuang committed
882
        xd->mi[x_idx + y * mis].src_mi = mi_addr;
883
      }
884

885
  if (cpi->oxcf.aq_mode)
Dmitry Kovalev's avatar
Dmitry Kovalev committed
886
    vp9_init_plane_quantizers(cpi, x);
887

888
889
  // FIXME(rbultje) I'm pretty sure this should go to the end of this block
  // (i.e. after the output_enabled)
890
891
  if (bsize < BLOCK_32X32) {
    if (bsize < BLOCK_16X16)
892
893
      ctx->tx_rd_diff[ALLOW_16X16] = ctx->tx_rd_diff[ALLOW_8X8];
    ctx->tx_rd_diff[ALLOW_32X32] = ctx->tx_rd_diff[ALLOW_16X16];
Ronald S. Bultje's avatar
Ronald S. Bultje committed
894
  }
Adrian Grange's avatar
Adrian Grange committed
895

896
  if (is_inter_block(mbmi) && mbmi->sb_type < BLOCK_8X8) {
897
898
    mbmi->mv[0].as_int = mi->bmi[3].as_mv[0].as_int;
    mbmi->mv[1].as_int = mi->bmi[3].as_mv[1].as_int;
John Koleszar's avatar
John Koleszar committed
899
900
  }

901
  x->skip = ctx->skip;
902
  vpx_memcpy(x->zcoeff_blk[mbmi->tx_size], ctx->zcoeff_blk,
903
             sizeof(uint8_t) * ctx->num_4x4_blk);
904

Ronald S. Bultje's avatar
Ronald S. Bultje committed
905
906
907
  if (!output_enabled)
    return;

908
  if (!vp9_segfeature_active(&cm->seg, mbmi->segment_id, SEG_LVL_SKIP)) {
909
    for (i = 0; i < TX_MODES; i++)
910
      rdc->tx_select_diff[i] += ctx->tx_rd_diff[i];
911
912
  }

913
#if CONFIG_INTERNAL_STATS
914
  if (frame_is_intra_only(cm)) {
John Koleszar's avatar
John Koleszar committed
915
    static const int kf_mode_index[] = {
916
917
918
919
      THR_DC        /*DC_PRED*/,
      THR_V_PRED    /*V_PRED*/,
      THR_H_PRED    /*H_PRED*/,
      THR_D45_PRED  /*D45_PRED*/,
John Koleszar's avatar
John Koleszar committed
920
921
922
      THR_D135_PRED /*D135_PRED*/,
      THR_D117_PRED /*D117_PRED*/,
      THR_D153_PRED /*D153_PRED*/,
Dmitry Kovalev's avatar
Dmitry Kovalev committed
923
      THR_D207_PRED /*D207_PRED*/,
924
925
      THR_D63_PRED  /*D63_PRED*/,
      THR_TM        /*TM_PRED*/,
John Koleszar's avatar
John Koleszar committed
926
    };
927
    ++cpi->mode_chosen_counts[kf_mode_index[mbmi->mode]];
John Koleszar's avatar
John Koleszar committed
928
929
  } else {
    // Note how often each mode chosen as best
930
931
932
933
    ++cpi->mode_chosen_counts[ctx->best_mode_index];
  }
#endif
  if (!frame_is_intra_only(cm)) {
934
    if (is_inter_block(mbmi)) {
935
      vp9_update_mv_count(td);
936
937
938

      if (cm->interp_filter == SWITCHABLE) {
        const int ctx = vp9_get_pred_context_switchable_interp(xd);
939
        ++td->counts->switchable_interp[ctx][mbmi->interp_filter];
940
      }
941
    }
Adrian Grange's avatar
Adrian Grange committed
942

943
944
945
    rdc->comp_pred_diff[SINGLE_REFERENCE] += ctx->single_pred_diff;
    rdc->comp_pred_diff[COMPOUND_REFERENCE] += ctx->comp_pred_diff;
    rdc->comp_pred_diff[REFERENCE_MODE_SELECT] += ctx->hybrid_pred_diff;
946

947
    for (i = 0; i < SWITCHABLE_FILTER_CONTEXTS; ++i)
948
      rdc->filter_diff[i] += ctx->best_filter_diff[i];
John Koleszar's avatar
John Koleszar committed
949
  }
950
951
952
953
954
955
956
957
958
959
960

  for (h = 0; h < y_mis; ++h) {
    MV_REF *const frame_mv = frame_mvs + h * cm->mi_cols;
    for (w = 0; w < x_mis; ++w) {
      MV_REF *const mv = frame_mv + w;
      mv->ref_frame[0] = mi->src_mi->mbmi.ref_frame[0];
      mv->ref_frame[1] = mi->src_mi->mbmi.ref_frame[1];
      mv->mv[0].as_int = mi->src_mi->mbmi.mv[0].as_int;
      mv->mv[1].as_int = mi->src_mi->mbmi.mv[1].as_int;
    }
  }
Adrian Grange's avatar
Adrian Grange committed
961
962
}

Jim Bankoski's avatar
Jim Bankoski committed
963
void vp9_setup_src_planes(MACROBLOCK *x, const YV12_BUFFER_CONFIG *src,
964
                          int mi_row, int mi_col) {
965
966
  uint8_t *const buffers[3] = {src->y_buffer, src->u_buffer, src->v_buffer };
  const int strides[3] = {src->y_stride, src->uv_stride, src->uv_stride };
967
968
  int i;

969
970
971
  // Set current frame pointer.
  x->e_mbd.cur_buf = src;

972
973
  for (i = 0; i < MAX_MB_PLANE; i++)
    setup_pred_plane(&x->plane[i].src, buffers[i], strides[i], mi_row, mi_col,
Jim Bankoski's avatar
Jim Bankoski committed
974
                     NULL, x->e_mbd.plane[i].subsampling_x,
975
                     x->e_mbd.plane[i].subsampling_y);
John Koleszar's avatar
John Koleszar committed
976
977
}

978
979
static void set_mode_info_seg_skip(MACROBLOCK *x, TX_MODE tx_mode,
                                   RD_COST *rd_cost, BLOCK_SIZE bsize) {
980
  MACROBLOCKD *const xd = &x->e_mbd;
hkuang's avatar
hkuang committed
981
  MB_MODE_INFO *const mbmi = &xd->mi[0].src_mi->mbmi;
982
983
984
  INTERP_FILTER filter_ref;

  if (xd->up_available)
hkuang's avatar
hkuang committed
985
    filter_ref = xd->mi[-xd->mi_stride].src_mi->mbmi.interp_filter;
986
  else if (xd->left_available)
hkuang's avatar
hkuang committed
987
    filter_ref = xd->mi[-1].src_mi->mbmi.interp_filter;
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
  else
    filter_ref = EIGHTTAP;

  mbmi->sb_type = bsize;
  mbmi->mode = ZEROMV;
  mbmi->tx_size = MIN(max_txsize_lookup[bsize],
                      tx_mode_to_biggest_tx_size[tx_mode]);
  mbmi->skip = 1;
  mbmi->uv_mode = DC_PRED;
  mbmi->ref_frame[0] = LAST_FRAME;
  mbmi->ref_frame[1] = NONE;
  mbmi->mv[0].as_int = 0;
  mbmi->interp_filter = filter_ref;

hkuang's avatar
hkuang committed
1002
  xd->mi[0].src_mi->bmi[0].as_mv[0].as_int = 0;
1003
1004
  x->skip = 1;

1005
  vp9_rd_cost_init(rd_cost);