vp9_encodeframe.c 152 KB
Newer Older
John Koleszar's avatar
John Koleszar committed
1
/*
2
 *  Copyright (c) 2010 The WebM project authors. All Rights Reserved.
John Koleszar's avatar
John Koleszar committed
3
 *
4
 *  Use of this source code is governed by a BSD-style license
5 6
 *  that can be found in the LICENSE file in the root of the source
 *  tree. An additional intellectual property rights grant can be found
7
 *  in the file PATENTS.  All contributing project authors may
8
 *  be found in the AUTHORS file in the root of the source tree.
John Koleszar's avatar
John Koleszar committed
9 10
 */

Dmitry Kovalev's avatar
Dmitry Kovalev committed
11 12 13 14
#include <limits.h>
#include <math.h>
#include <stdio.h>

Jim Bankoski's avatar
Jim Bankoski committed
15
#include "./vp9_rtcd.h"
Dmitry Kovalev's avatar
Dmitry Kovalev committed
16 17 18 19
#include "./vpx_config.h"

#include "vpx_ports/vpx_timer.h"

20
#include "vp9/common/vp9_common.h"
Yaowu Xu's avatar
Yaowu Xu committed
21
#include "vp9/common/vp9_entropy.h"
22
#include "vp9/common/vp9_entropymode.h"
23
#include "vp9/common/vp9_idct.h"
Dmitry Kovalev's avatar
Dmitry Kovalev committed
24 25 26
#include "vp9/common/vp9_mvref_common.h"
#include "vp9/common/vp9_pred_common.h"
#include "vp9/common/vp9_quant_common.h"
27
#include "vp9/common/vp9_reconintra.h"
Dmitry Kovalev's avatar
Dmitry Kovalev committed
28
#include "vp9/common/vp9_reconinter.h"
29
#include "vp9/common/vp9_seg_common.h"
30
#include "vp9/common/vp9_systemdependent.h"
31
#include "vp9/common/vp9_tile_common.h"
32

33
#include "vp9/encoder/vp9_aq_complexity.h"
Marco Paniconi's avatar
Marco Paniconi committed
34 35
#include "vp9/encoder/vp9_aq_cyclicrefresh.h"
#include "vp9/encoder/vp9_aq_variance.h"
Dmitry Kovalev's avatar
Dmitry Kovalev committed
36 37 38
#include "vp9/encoder/vp9_encodeframe.h"
#include "vp9/encoder/vp9_encodemb.h"
#include "vp9/encoder/vp9_encodemv.h"
39
#include "vp9/encoder/vp9_ethread.h"
40
#include "vp9/encoder/vp9_extend.h"
41
#include "vp9/encoder/vp9_pickmode.h"
42
#include "vp9/encoder/vp9_rd.h"
Dmitry Kovalev's avatar
Dmitry Kovalev committed
43 44
#include "vp9/encoder/vp9_rdopt.h"
#include "vp9/encoder/vp9_segmentation.h"
45
#include "vp9/encoder/vp9_tokenize.h"
46

47 48
static void encode_superblock(VP9_COMP *cpi, ThreadData * td,
                              TOKENEXTRA **t, int output_enabled,
49 50
                              int mi_row, int mi_col, BLOCK_SIZE bsize,
                              PICK_MODE_CONTEXT *ctx);
51

52 53 54 55
// This is used as a reference when computing the source variance for the
//  purposes of activity masking.
// Eventually this should be replaced by custom no-reference routines,
//  which will be faster.
56
static const uint8_t VP9_VAR_OFFS[64] = {
57 58 59 60 61 62 63 64
    128, 128, 128, 128, 128, 128, 128, 128,
    128, 128, 128, 128, 128, 128, 128, 128,
    128, 128, 128, 128, 128, 128, 128, 128,
    128, 128, 128, 128, 128, 128, 128, 128,
    128, 128, 128, 128, 128, 128, 128, 128,
    128, 128, 128, 128, 128, 128, 128, 128,
    128, 128, 128, 128, 128, 128, 128, 128,
    128, 128, 128, 128, 128, 128, 128, 128
65 66
};

67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101
#if CONFIG_VP9_HIGHBITDEPTH
static const uint16_t VP9_HIGH_VAR_OFFS_8[64] = {
    128, 128, 128, 128, 128, 128, 128, 128,
    128, 128, 128, 128, 128, 128, 128, 128,
    128, 128, 128, 128, 128, 128, 128, 128,
    128, 128, 128, 128, 128, 128, 128, 128,
    128, 128, 128, 128, 128, 128, 128, 128,
    128, 128, 128, 128, 128, 128, 128, 128,
    128, 128, 128, 128, 128, 128, 128, 128,
    128, 128, 128, 128, 128, 128, 128, 128
};

static const uint16_t VP9_HIGH_VAR_OFFS_10[64] = {
    128*4, 128*4, 128*4, 128*4, 128*4, 128*4, 128*4, 128*4,
    128*4, 128*4, 128*4, 128*4, 128*4, 128*4, 128*4, 128*4,
    128*4, 128*4, 128*4, 128*4, 128*4, 128*4, 128*4, 128*4,
    128*4, 128*4, 128*4, 128*4, 128*4, 128*4, 128*4, 128*4,
    128*4, 128*4, 128*4, 128*4, 128*4, 128*4, 128*4, 128*4,
    128*4, 128*4, 128*4, 128*4, 128*4, 128*4, 128*4, 128*4,
    128*4, 128*4, 128*4, 128*4, 128*4, 128*4, 128*4, 128*4,
    128*4, 128*4, 128*4, 128*4, 128*4, 128*4, 128*4, 128*4
};

static const uint16_t VP9_HIGH_VAR_OFFS_12[64] = {
    128*16, 128*16, 128*16, 128*16, 128*16, 128*16, 128*16, 128*16,
    128*16, 128*16, 128*16, 128*16, 128*16, 128*16, 128*16, 128*16,
    128*16, 128*16, 128*16, 128*16, 128*16, 128*16, 128*16, 128*16,
    128*16, 128*16, 128*16, 128*16, 128*16, 128*16, 128*16, 128*16,
    128*16, 128*16, 128*16, 128*16, 128*16, 128*16, 128*16, 128*16,
    128*16, 128*16, 128*16, 128*16, 128*16, 128*16, 128*16, 128*16,
    128*16, 128*16, 128*16, 128*16, 128*16, 128*16, 128*16, 128*16,
    128*16, 128*16, 128*16, 128*16, 128*16, 128*16, 128*16, 128*16
};
#endif  // CONFIG_VP9_HIGHBITDEPTH

102 103 104
unsigned int vp9_get_sby_perpixel_variance(VP9_COMP *cpi,
                                           const struct buf_2d *ref,
                                           BLOCK_SIZE bs) {
105 106 107
  unsigned int sse;
  const unsigned int var = cpi->fn_ptr[bs].vf(ref->buf, ref->stride,
                                              VP9_VAR_OFFS, 0, &sse);
108
  return ROUND_POWER_OF_TWO(var, num_pels_log2_lookup[bs]);
109 110
}

111
#if CONFIG_VP9_HIGHBITDEPTH
112
unsigned int vp9_high_get_sby_perpixel_variance(
113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136
    VP9_COMP *cpi, const struct buf_2d *ref, BLOCK_SIZE bs, int bd) {
  unsigned int var, sse;
  switch (bd) {
    case 10:
      var = cpi->fn_ptr[bs].vf(ref->buf, ref->stride,
                               CONVERT_TO_BYTEPTR(VP9_HIGH_VAR_OFFS_10),
                               0, &sse);
      break;
    case 12:
      var = cpi->fn_ptr[bs].vf(ref->buf, ref->stride,
                               CONVERT_TO_BYTEPTR(VP9_HIGH_VAR_OFFS_12),
                               0, &sse);
      break;
    case 8:
    default:
      var = cpi->fn_ptr[bs].vf(ref->buf, ref->stride,
                               CONVERT_TO_BYTEPTR(VP9_HIGH_VAR_OFFS_8),
                               0, &sse);
      break;
  }
  return ROUND_POWER_OF_TWO(var, num_pels_log2_lookup[bs]);
}
#endif  // CONFIG_VP9_HIGHBITDEPTH

137
static unsigned int get_sby_perpixel_diff_variance(VP9_COMP *cpi,
138 139
                                                   const struct buf_2d *ref,
                                                   int mi_row, int mi_col,
140
                                                   BLOCK_SIZE bs) {
141 142
  unsigned int sse, var;
  uint8_t *last_y;
143
  const YV12_BUFFER_CONFIG *last = get_ref_frame_buffer(cpi, LAST_FRAME);
144 145 146 147 148

  assert(last != NULL);
  last_y =
      &last->y_buffer[mi_row * MI_SIZE * last->y_stride + mi_col * MI_SIZE];
  var = cpi->fn_ptr[bs].vf(ref->buf, ref->stride, last_y, last->y_stride, &sse);
149 150 151
  return ROUND_POWER_OF_TWO(var, num_pels_log2_lookup[bs]);
}

152
static BLOCK_SIZE get_rd_var_based_fixed_partition(VP9_COMP *cpi, MACROBLOCK *x,
153 154
                                                   int mi_row,
                                                   int mi_col) {
155
  unsigned int var = get_sby_perpixel_diff_variance(cpi, &x->plane[0].src,
156 157 158
                                                    mi_row, mi_col,
                                                    BLOCK_64X64);
  if (var < 8)
159
    return BLOCK_64X64;
160
  else if (var < 128)
161
    return BLOCK_32X32;
162 163 164 165
  else if (var < 2048)
    return BLOCK_16X16;
  else
    return BLOCK_8X8;
166 167
}

168
static BLOCK_SIZE get_nonrd_var_based_fixed_partition(VP9_COMP *cpi,
169
                                                      MACROBLOCK *x,
170 171
                                                      int mi_row,
                                                      int mi_col) {
172
  unsigned int var = get_sby_perpixel_diff_variance(cpi, &x->plane[0].src,
173 174
                                                    mi_row, mi_col,
                                                    BLOCK_64X64);
175
  if (var < 4)
176
    return BLOCK_64X64;
177
  else if (var < 10)
178 179
    return BLOCK_32X32;
  else
180
    return BLOCK_16X16;
181 182
}

183 184
// Lighter version of set_offsets that only sets the mode info
// pointers.
Jingning Han's avatar
Jingning Han committed
185 186 187 188
static INLINE void set_mode_info_offsets(VP9_COMMON *const cm,
                                         MACROBLOCKD *const xd,
                                         int mi_row,
                                         int mi_col) {
189
  const int idx_str = xd->mi_stride * mi_row + mi_col;
hkuang's avatar
hkuang committed
190 191
  xd->mi = cm->mi + idx_str;
  xd->mi[0].src_mi = &xd->mi[0];
192 193 194
}

static void set_offsets(VP9_COMP *cpi, const TileInfo *const tile,
195 196
                        MACROBLOCK *const x, int mi_row, int mi_col,
                        BLOCK_SIZE bsize) {
197 198 199 200 201 202 203
  VP9_COMMON *const cm = &cpi->common;
  MACROBLOCKD *const xd = &x->e_mbd;
  MB_MODE_INFO *mbmi;
  const int mi_width = num_8x8_blocks_wide_lookup[bsize];
  const int mi_height = num_8x8_blocks_high_lookup[bsize];
  const struct segmentation *const seg = &cm->seg;

204
  set_skip_context(xd, mi_row, mi_col);
205

Jingning Han's avatar
Jingning Han committed
206
  set_mode_info_offsets(cm, xd, mi_row, mi_col);
207

hkuang's avatar
hkuang committed
208
  mbmi = &xd->mi[0].src_mi->mbmi;
209 210

  // Set up destination pointers.
211
  vp9_setup_dst_planes(xd->plane, get_frame_new_buffer(cm), mi_row, mi_col);
212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228

  // Set up limit values for MV components.
  // Mv beyond the range do not produce new/different prediction block.
  x->mv_row_min = -(((mi_row + mi_height) * MI_SIZE) + VP9_INTERP_EXTEND);
  x->mv_col_min = -(((mi_col + mi_width) * MI_SIZE) + VP9_INTERP_EXTEND);
  x->mv_row_max = (cm->mi_rows - mi_row) * MI_SIZE + VP9_INTERP_EXTEND;
  x->mv_col_max = (cm->mi_cols - mi_col) * MI_SIZE + VP9_INTERP_EXTEND;

  // Set up distance of MB to edge of frame in 1/8th pel units.
  assert(!(mi_col & (mi_width - 1)) && !(mi_row & (mi_height - 1)));
  set_mi_row_col(xd, tile, mi_row, mi_height, mi_col, mi_width,
                 cm->mi_rows, cm->mi_cols);

  // Set up source buffers.
  vp9_setup_src_planes(x, cpi->Source, mi_row, mi_col);

  // R/D setup.
Dmitry Kovalev's avatar
Dmitry Kovalev committed
229 230
  x->rddiv = cpi->rd.RDDIV;
  x->rdmult = cpi->rd.RDMULT;
231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247

  // Setup segment ID.
  if (seg->enabled) {
    if (cpi->oxcf.aq_mode != VARIANCE_AQ) {
      const uint8_t *const map = seg->update_map ? cpi->segmentation_map
                                                 : cm->last_frame_seg_map;
      mbmi->segment_id = vp9_get_segment_id(cm, map, bsize, mi_row, mi_col);
    }
    vp9_init_plane_quantizers(cpi, x);

    x->encode_breakout = cpi->segment_encode_breakout[mbmi->segment_id];
  } else {
    mbmi->segment_id = 0;
    x->encode_breakout = cpi->encode_breakout;
  }
}

248 249 250
static void duplicate_mode_info_in_sb(VP9_COMMON *cm, MACROBLOCKD *xd,
                                      int mi_row, int mi_col,
                                      BLOCK_SIZE bsize) {
251 252 253 254 255 256
  const int block_width = num_8x8_blocks_wide_lookup[bsize];
  const int block_height = num_8x8_blocks_high_lookup[bsize];
  int i, j;
  for (j = 0; j < block_height; ++j)
    for (i = 0; i < block_width; ++i) {
      if (mi_row + j < cm->mi_rows && mi_col + i < cm->mi_cols)
hkuang's avatar
hkuang committed
257
        xd->mi[j * xd->mi_stride + i].src_mi = &xd->mi[0];
258 259 260 261
    }
}

static void set_block_size(VP9_COMP * const cpi,
262
                           MACROBLOCKD *const xd,
263 264 265
                           int mi_row, int mi_col,
                           BLOCK_SIZE bsize) {
  if (cpi->common.mi_cols > mi_col && cpi->common.mi_rows > mi_row) {
Jingning Han's avatar
Jingning Han committed
266
    set_mode_info_offsets(&cpi->common, xd, mi_row, mi_col);
hkuang's avatar
hkuang committed
267
    xd->mi[0].src_mi->mbmi.sb_type = bsize;
268 269 270 271 272 273
  }
}

typedef struct {
  int64_t sum_square_error;
  int64_t sum_error;
274
  int log2_count;
275 276 277 278 279 280 281 282 283 284 285 286
  int variance;
} var;

typedef struct {
  var none;
  var horz[2];
  var vert[2];
} partition_variance;

typedef struct {
  partition_variance part_variances;
  var split[4];
287 288 289 290 291
} v4x4;

typedef struct {
  partition_variance part_variances;
  v4x4 split[4];
292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321
} v8x8;

typedef struct {
  partition_variance part_variances;
  v8x8 split[4];
} v16x16;

typedef struct {
  partition_variance part_variances;
  v16x16 split[4];
} v32x32;

typedef struct {
  partition_variance part_variances;
  v32x32 split[4];
} v64x64;

typedef struct {
  partition_variance *part_variances;
  var *split[4];
} variance_node;

typedef enum {
  V16X16,
  V32X32,
  V64X64,
} TREE_LEVEL;

static void tree_to_node(void *data, BLOCK_SIZE bsize, variance_node *node) {
  int i;
322
  node->part_variances = NULL;
323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347
  switch (bsize) {
    case BLOCK_64X64: {
      v64x64 *vt = (v64x64 *) data;
      node->part_variances = &vt->part_variances;
      for (i = 0; i < 4; i++)
        node->split[i] = &vt->split[i].part_variances.none;
      break;
    }
    case BLOCK_32X32: {
      v32x32 *vt = (v32x32 *) data;
      node->part_variances = &vt->part_variances;
      for (i = 0; i < 4; i++)
        node->split[i] = &vt->split[i].part_variances.none;
      break;
    }
    case BLOCK_16X16: {
      v16x16 *vt = (v16x16 *) data;
      node->part_variances = &vt->part_variances;
      for (i = 0; i < 4; i++)
        node->split[i] = &vt->split[i].part_variances.none;
      break;
    }
    case BLOCK_8X8: {
      v8x8 *vt = (v8x8 *) data;
      node->part_variances = &vt->part_variances;
348 349 350 351 352 353 354
      for (i = 0; i < 4; i++)
        node->split[i] = &vt->split[i].part_variances.none;
      break;
    }
    case BLOCK_4X4: {
      v4x4 *vt = (v4x4 *) data;
      node->part_variances = &vt->part_variances;
355 356 357 358 359 360
      for (i = 0; i < 4; i++)
        node->split[i] = &vt->split[i];
      break;
    }
    default: {
      assert(0);
361
      break;
362 363 364 365 366 367 368 369
    }
  }
}

// Set variance values given sum square error, sum error, count.
static void fill_variance(int64_t s2, int64_t s, int c, var *v) {
  v->sum_square_error = s2;
  v->sum_error = s;
370
  v->log2_count = c;
371 372 373
}

static void get_variance(var *v) {
374 375
  v->variance = (int)(256 * (v->sum_square_error -
      ((v->sum_error * v->sum_error) >> v->log2_count)) >> v->log2_count);
376 377 378
}

void sum_2_variances(const var *a, const var *b, var *r) {
379
  assert(a->log2_count == b->log2_count);
380
  fill_variance(a->sum_square_error + b->sum_square_error,
381
                a->sum_error + b->sum_error, a->log2_count + 1, r);
382 383 384 385 386 387 388 389 390 391 392 393 394 395
}

static void fill_variance_tree(void *data, BLOCK_SIZE bsize) {
  variance_node node;
  tree_to_node(data, bsize, &node);
  sum_2_variances(node.split[0], node.split[1], &node.part_variances->horz[0]);
  sum_2_variances(node.split[2], node.split[3], &node.part_variances->horz[1]);
  sum_2_variances(node.split[0], node.split[2], &node.part_variances->vert[0]);
  sum_2_variances(node.split[1], node.split[3], &node.part_variances->vert[1]);
  sum_2_variances(&node.part_variances->vert[0], &node.part_variances->vert[1],
                  &node.part_variances->none);
}

static int set_vt_partitioning(VP9_COMP *cpi,
396
                               MACROBLOCKD *const xd,
397 398 399
                               void *data,
                               BLOCK_SIZE bsize,
                               int mi_row,
400
                               int mi_col,
401
                               int64_t threshold,
402
                               BLOCK_SIZE bsize_min,
Marco's avatar
Marco committed
403
                               int force_split) {
404 405 406 407
  VP9_COMMON * const cm = &cpi->common;
  variance_node vt;
  const int block_width = num_8x8_blocks_wide_lookup[bsize];
  const int block_height = num_8x8_blocks_high_lookup[bsize];
408

409 410 411
  assert(block_height == block_width);
  tree_to_node(data, bsize, &vt);

Marco's avatar
Marco committed
412
  if (force_split)
413 414
    return 0;

415
  // For bsize=bsize_min (16x16/8x8 for 8x8/4x4 downsampling), select if
416 417
  // variance is below threshold, otherwise split will be selected.
  // No check for vert/horiz split as too few samples for variance.
418
  if (bsize == bsize_min) {
419
    get_variance(&vt.part_variances->none);
420 421
    if (mi_col + block_width / 2 < cm->mi_cols &&
        mi_row + block_height / 2 < cm->mi_rows &&
422
        vt.part_variances->none.variance < threshold) {
423 424 425 426
      set_block_size(cpi, xd, mi_row, mi_col, bsize);
      return 1;
    }
    return 0;
427
  } else if (bsize > bsize_min) {
Marco's avatar
Marco committed
428 429 430
    // Variance is already computed for 32x32 blocks to set the force_split.
    if (bsize != BLOCK_32X32)
      get_variance(&vt.part_variances->none);
431 432
    // For key frame or low_res: for bsize above 32X32 or very high variance,
    // take split.
433 434
    if (cm->frame_type == KEY_FRAME &&
        (bsize > BLOCK_32X32 ||
435
        vt.part_variances->none.variance > (threshold << 4))) {
436 437 438 439 440
      return 0;
    }
    // If variance is low, take the bsize (no split).
    if (mi_col + block_width / 2 < cm->mi_cols &&
        mi_row + block_height / 2 < cm->mi_rows &&
441
        vt.part_variances->none.variance < threshold) {
442 443 444
      set_block_size(cpi, xd, mi_row, mi_col, bsize);
      return 1;
    }
445

446
    // Check vertical split.
447 448 449
    if (mi_row + block_height / 2 < cm->mi_rows) {
      get_variance(&vt.part_variances->vert[0]);
      get_variance(&vt.part_variances->vert[1]);
450 451
      if (vt.part_variances->vert[0].variance < threshold &&
          vt.part_variances->vert[1].variance < threshold) {
452 453 454 455 456
        BLOCK_SIZE subsize = get_subsize(bsize, PARTITION_VERT);
        set_block_size(cpi, xd, mi_row, mi_col, subsize);
        set_block_size(cpi, xd, mi_row, mi_col + block_width / 2, subsize);
        return 1;
      }
457
    }
458
    // Check horizontal split.
459 460 461
    if (mi_col + block_width / 2 < cm->mi_cols) {
      get_variance(&vt.part_variances->horz[0]);
      get_variance(&vt.part_variances->horz[1]);
462 463
      if (vt.part_variances->horz[0].variance < threshold &&
          vt.part_variances->horz[1].variance < threshold) {
464 465 466 467 468
        BLOCK_SIZE subsize = get_subsize(bsize, PARTITION_HORZ);
        set_block_size(cpi, xd, mi_row, mi_col, subsize);
        set_block_size(cpi, xd, mi_row + block_height / 2, mi_col, subsize);
        return 1;
      }
469
    }
470

471
    return 0;
472 473 474 475
  }
  return 0;
}

Yaowu Xu's avatar
Yaowu Xu committed
476 477 478

void vp9_set_vbp_thresholds(VP9_COMP *cpi, int q) {
  SPEED_FEATURES *const sf = &cpi->sf;
479 480
  if (sf->partition_search_type != VAR_BASED_PARTITION &&
      sf->partition_search_type != REFERENCE_PARTITION) {
Yaowu Xu's avatar
Yaowu Xu committed
481 482 483 484
    return;
  } else {
    VP9_COMMON *const cm = &cpi->common;
    const int is_key_frame = (cm->frame_type == KEY_FRAME);
Yunqing Wang's avatar
Yunqing Wang committed
485
    const int threshold_multiplier = is_key_frame ? 20 : 1;
Yaowu Xu's avatar
Yaowu Xu committed
486
    const int64_t threshold_base = (int64_t)(threshold_multiplier *
Yunqing Wang's avatar
Yunqing Wang committed
487
        cpi->y_dequant[q][1]);
Yaowu Xu's avatar
Yaowu Xu committed
488 489 490 491 492 493

    // TODO(marpan): Allow 4x4 partitions for inter-frames.
    // use_4x4_partition = (variance4x4downsample[i2 + j] == 1);
    // If 4x4 partition is not used, then 8x8 partition will be selected
    // if variance of 16x16 block is very high, so use larger threshold
    // for 16x16 (threshold_bsize_min) in that case.
494
    if (is_key_frame) {
Yunqing Wang's avatar
Yunqing Wang committed
495 496 497 498
      cpi->vbp_threshold_64x64 = threshold_base;
      cpi->vbp_threshold_32x32 = threshold_base >> 2;
      cpi->vbp_threshold_16x16 = threshold_base >> 2;
      cpi->vbp_threshold_8x8 = threshold_base << 2;
499 500
      cpi->vbp_bsize_min = BLOCK_8X8;
    } else {
Yunqing Wang's avatar
Yunqing Wang committed
501
      cpi->vbp_threshold_32x32 = threshold_base;
502
      if (cm->width <= 352 && cm->height <= 288) {
Yunqing Wang's avatar
Yunqing Wang committed
503 504
        cpi->vbp_threshold_64x64 = threshold_base >> 2;
        cpi->vbp_threshold_16x16 = threshold_base << 3;
505
      } else {
Yunqing Wang's avatar
Yunqing Wang committed
506 507
        cpi->vbp_threshold_64x64 = threshold_base;
        cpi->vbp_threshold_16x16 = threshold_base << cpi->oxcf.speed;
508 509 510
      }
      cpi->vbp_bsize_min = BLOCK_16X16;
    }
Yaowu Xu's avatar
Yaowu Xu committed
511 512 513
  }
}

514
// This function chooses partitioning based on the variance between source and
515
// reconstructed last, where variance is computed for down-sampled inputs.
516 517
static void choose_partitioning(VP9_COMP *cpi,
                                const TileInfo *const tile,
518
                                MACROBLOCK *x,
519 520
                                int mi_row, int mi_col) {
  VP9_COMMON * const cm = &cpi->common;
521
  MACROBLOCKD *xd = &x->e_mbd;
522
  int i, j, k, m;
523
  v64x64 vt;
524
  v16x16 vt2[16];
Marco's avatar
Marco committed
525
  int force_split[5];
526 527 528 529 530
  uint8_t *s;
  const uint8_t *d;
  int sp;
  int dp;
  int pixels_wide = 64, pixels_high = 64;
Yaowu Xu's avatar
Yaowu Xu committed
531

532
  // Always use 4x4 partition for key frame.
533 534
  const int is_key_frame = (cm->frame_type == KEY_FRAME);
  const int use_4x4_partition = is_key_frame;
Yaowu Xu's avatar
Yaowu Xu committed
535
  const int low_res = (cm->width <= 352 && cm->height <= 288);
536 537
  int variance4x4downsample[16];

538 539 540 541 542 543 544
  int segment_id = CR_SEGMENT_ID_BASE;
  if (cpi->oxcf.aq_mode == CYCLIC_REFRESH_AQ && cm->seg.enabled) {
    const uint8_t *const map = cm->seg.update_map ? cpi->segmentation_map :
                                                    cm->last_frame_seg_map;
    segment_id = vp9_get_segment_id(cm, map, BLOCK_64X64, mi_row, mi_col);
  }

545
  set_offsets(cpi, tile, x, mi_row, mi_col, BLOCK_64X64);
546 547 548 549 550 551 552 553 554

  if (xd->mb_to_right_edge < 0)
    pixels_wide += (xd->mb_to_right_edge >> 3);
  if (xd->mb_to_bottom_edge < 0)
    pixels_high += (xd->mb_to_bottom_edge >> 3);

  s = x->plane[0].src.buf;
  sp = x->plane[0].src.stride;

555
  if (!is_key_frame) {
556
    MB_MODE_INFO *mbmi = &xd->mi[0].src_mi->mbmi;
557
    unsigned int uv_sad;
558
    const YV12_BUFFER_CONFIG *yv12 = get_ref_frame_buffer(cpi, LAST_FRAME);
559

560 561
    const YV12_BUFFER_CONFIG *yv12_g = get_ref_frame_buffer(cpi, GOLDEN_FRAME);
    unsigned int y_sad, y_sad_g;
562
    BLOCK_SIZE bsize;
563 564 565 566 567 568 569 570
    if (mi_row + 4 < cm->mi_rows && mi_col + 4 < cm->mi_cols)
      bsize = BLOCK_64X64;
    else if (mi_row + 4 < cm->mi_rows && mi_col + 4 >= cm->mi_cols)
      bsize = BLOCK_32X64;
    else if (mi_row + 4 >= cm->mi_rows && mi_col + 4 < cm->mi_cols)
      bsize = BLOCK_64X32;
    else
      bsize = BLOCK_32X32;
571

572
    assert(yv12 != NULL);
573

574 575 576 577 578 579 580 581 582 583
    if (yv12_g && yv12_g != yv12) {
      vp9_setup_pre_planes(xd, 0, yv12_g, mi_row, mi_col,
                           &cm->frame_refs[GOLDEN_FRAME - 1].sf);
      y_sad_g = cpi->fn_ptr[bsize].sdf(x->plane[0].src.buf,
                                       x->plane[0].src.stride,
                                       xd->plane[0].pre[0].buf,
                                       xd->plane[0].pre[0].stride);
    } else {
      y_sad_g = UINT_MAX;
    }
584

Yaowu Xu's avatar
Yaowu Xu committed
585
    vp9_setup_pre_planes(xd, 0, yv12, mi_row, mi_col,
586
                         &cm->frame_refs[LAST_FRAME - 1].sf);
587 588 589 590
    mbmi->ref_frame[0] = LAST_FRAME;
    mbmi->ref_frame[1] = NONE;
    mbmi->sb_type = BLOCK_64X64;
    mbmi->mv[0].as_int = 0;
591
    mbmi->interp_filter = BILINEAR;
592

593
    y_sad = vp9_int_pro_motion_estimation(cpi, x, bsize);
594 595 596 597 598 599 600 601 602
    if (y_sad_g < y_sad) {
      vp9_setup_pre_planes(xd, 0, yv12_g, mi_row, mi_col,
                           &cm->frame_refs[GOLDEN_FRAME - 1].sf);
      mbmi->ref_frame[0] = GOLDEN_FRAME;
      mbmi->mv[0].as_int = 0;
      y_sad = y_sad_g;
    } else {
      x->pred_mv[LAST_FRAME] = mbmi->mv[0].as_mv;
    }
603

604
    vp9_build_inter_predictors_sb(xd, mi_row, mi_col, BLOCK_64X64);
605 606 607 608

    for (i = 1; i <= 2; ++i) {
      struct macroblock_plane  *p = &x->plane[i];
      struct macroblockd_plane *pd = &xd->plane[i];
Yaowu Xu's avatar
Yaowu Xu committed
609
      const BLOCK_SIZE bs = get_plane_block_size(bsize, pd);
610

Yaowu Xu's avatar
Yaowu Xu committed
611
      if (bs == BLOCK_INVALID)
612
        uv_sad = UINT_MAX;
Yaowu Xu's avatar
Yaowu Xu committed
613 614 615
      else
        uv_sad = cpi->fn_ptr[bs].sdf(p->src.buf, p->src.stride,
                                     pd->dst.buf, pd->dst.stride);
616

617
      x->color_sensitivity[i - 1] = uv_sad > (y_sad >> 2);
618
    }
619 620 621 622 623 624

    d = xd->plane[0].dst.buf;
    dp = xd->plane[0].dst.stride;
  } else {
    d = VP9_VAR_OFFS;
    dp = 0;
625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640
#if CONFIG_VP9_HIGHBITDEPTH
    if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
      switch (xd->bd) {
        case 10:
          d = CONVERT_TO_BYTEPTR(VP9_HIGH_VAR_OFFS_10);
          break;
        case 12:
          d = CONVERT_TO_BYTEPTR(VP9_HIGH_VAR_OFFS_12);
          break;
        case 8:
        default:
          d = CONVERT_TO_BYTEPTR(VP9_HIGH_VAR_OFFS_8);
          break;
      }
    }
#endif  // CONFIG_VP9_HIGHBITDEPTH
641 642
  }

Marco's avatar
Marco committed
643 644
  // Index for force_split: 0 for 64x64, 1-4 for 32x32 blocks,
  force_split[0] = 0;
645 646
  // Fill in the entire tree of 8x8 (or 4x4 under some conditions) variances
  // for splits.
647 648 649
  for (i = 0; i < 4; i++) {
    const int x32_idx = ((i & 1) << 5);
    const int y32_idx = ((i >> 1) << 5);
650
    const int i2 = i << 2;
Marco's avatar
Marco committed
651
    force_split[i + 1] = 0;
652 653 654 655
    for (j = 0; j < 4; j++) {
      const int x16_idx = x32_idx + ((j & 1) << 4);
      const int y16_idx = y32_idx + ((j >> 1) << 4);
      v16x16 *vst = &vt.split[i].split[j];
656
      variance4x4downsample[i2 + j] = 0;
657
      if (!is_key_frame) {
658 659 660
        for (k = 0; k < 4; k++) {
          int x8_idx = x16_idx + ((k & 1) << 3);
          int y8_idx = y16_idx + ((k >> 1) << 3);
661 662 663 664
          unsigned int sse = 0;
          int sum = 0;
          if (x8_idx < pixels_wide && y8_idx < pixels_high) {
            int s_avg, d_avg;
665
#if CONFIG_VP9_HIGHBITDEPTH
666 667 668 669
            if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
              s_avg = vp9_highbd_avg_8x8(s + y8_idx * sp + x8_idx, sp);
              d_avg = vp9_highbd_avg_8x8(d + y8_idx * dp + x8_idx, dp);
            } else {
670 671
              s_avg = vp9_avg_8x8(s + y8_idx * sp + x8_idx, sp);
              d_avg = vp9_avg_8x8(d + y8_idx * dp + x8_idx, dp);
672
            }
673 674 675 676 677 678 679 680 681 682 683
#else
            s_avg = vp9_avg_8x8(s + y8_idx * sp + x8_idx, sp);
            d_avg = vp9_avg_8x8(d + y8_idx * dp + x8_idx, dp);
#endif
            sum = s_avg - d_avg;
            sse = sum * sum;
          }
          // If variance is based on 8x8 downsampling, we stop here and have
          // one sample for 8x8 block (so use 1 for count in fill_variance),
          // which of course means variance = 0 for 8x8 block.
          fill_variance(sse, sum, 0, &vst->split[k].part_variances.none);
684 685 686 687 688 689 690 691
        }
        fill_variance_tree(&vt.split[i].split[j], BLOCK_16X16);
        // For low-resolution, compute the variance based on 8x8 down-sampling,
        // and if it is large (above the threshold) we go down for 4x4.
        // For key frame we always go down to 4x4.
        if (low_res)
          get_variance(&vt.split[i].split[j].part_variances.none);
      }
692
      if (is_key_frame || (low_res &&
693
          vt.split[i].split[j].part_variances.none.variance >
Yunqing Wang's avatar
Yunqing Wang committed
694
          (cpi->vbp_threshold_32x32 << 1))) {
695 696 697 698 699
        // Go down to 4x4 down-sampling for variance.
        variance4x4downsample[i2 + j] = 1;
        for (k = 0; k < 4; k++) {
          int x8_idx = x16_idx + ((k & 1) << 3);
          int y8_idx = y16_idx + ((k >> 1) << 3);
700
          v8x8 *vst2 = is_key_frame ? &vst->split[k] :
701
              &vt2[i2 + j].split[k];
702 703 704 705 706 707
          for (m = 0; m < 4; m++) {
            int x4_idx = x8_idx + ((m & 1) << 2);
            int y4_idx = y8_idx + ((m >> 1) << 2);
            unsigned int sse = 0;
            int sum = 0;
            if (x4_idx < pixels_wide && y4_idx < pixels_high) {
708
              int d_avg = 128;
709 710 711 712
#if CONFIG_VP9_HIGHBITDEPTH
              int s_avg;
              if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
                s_avg = vp9_highbd_avg_4x4(s + y4_idx * sp + x4_idx, sp);
713 714
                if (cm->frame_type != KEY_FRAME)
                  d_avg = vp9_highbd_avg_4x4(d + y4_idx * dp + x4_idx, dp);
715 716
              } else {
                s_avg = vp9_avg_4x4(s + y4_idx * sp + x4_idx, sp);
717 718
                if (cm->frame_type != KEY_FRAME)
                  d_avg = vp9_avg_4x4(d + y4_idx * dp + x4_idx, dp);
719 720
              }
#else
721
              int s_avg = vp9_avg_4x4(s + y4_idx * sp + x4_idx, sp);
722
              if (!is_key_frame)
723
                d_avg = vp9_avg_4x4(d + y4_idx * dp + x4_idx, dp);
724
#endif
725
              sum = s_avg - d_avg;
726 727
              sse = sum * sum;
            }
728
            // If variance is based on 4x4 down-sampling, we stop here and have
729 730
            // one sample for 4x4 block (so use 1 for count in fill_variance),
            // which of course means variance = 0 for 4x4 block.
731
            fill_variance(sse, sum, 0, &vst2->split[m].part_variances.none);
732
          }
733
        }
734 735 736
      }
    }
  }
737

Marco's avatar
Marco committed
738 739 740 741 742
  // No 64x64 blocks on segments other than base (un-boosted) segment,
  // so force split.
  if (cyclic_refresh_segment_id_boosted(segment_id))
    force_split[0] = 1;

743 744
  // Fill the rest of the variance tree by summing split partition values.
  for (i = 0; i < 4; i++) {
745
    const int i2 = i << 2;
746
    for (j = 0; j < 4; j++) {
747
      if (variance4x4downsample[i2 + j] == 1) {
748
        v16x16 *vtemp = (!is_key_frame) ? &vt2[i2 + j] :
749
            &vt.split[i].split[j];
Marco's avatar
Marco committed
750
        for (m = 0; m < 4; m++)
751 752
          fill_variance_tree(&vtemp->split[m], BLOCK_8X8);
        fill_variance_tree(vtemp, BLOCK_16X16);
753
      }
754 755
    }
    fill_variance_tree(&vt.split[i], BLOCK_32X32);
Marco's avatar
Marco committed
756 757 758
    // If variance of this 32x32 block is above the threshold, force the block
    // to split. This also forces a split on the upper (64x64) level.
    get_variance(&vt.split[i].part_variances.none);
Yunqing Wang's avatar
Yunqing Wang committed
759
    if (vt.split[i].part_variances.none.variance > cpi->vbp_threshold_32x32) {
Marco's avatar
Marco committed
760 761 762
      force_split[i + 1] = 1;
      force_split[0] = 1;
    }
763
  }
Marco's avatar
Marco committed
764 765
  if (!force_split[0])
    fill_variance_tree(&vt, BLOCK_64X64);
766 767

  // Now go through the entire structure,  splitting every block size until
768
  // we get to one that's got a variance lower than our threshold.
769
  if ( mi_col + 8 > cm->mi_cols || mi_row + 8 > cm->mi_rows ||
770
      !set_vt_partitioning(cpi, xd, &vt, BLOCK_64X64, mi_row, mi_col,
Yunqing Wang's avatar
Yunqing Wang committed
771
                           cpi->vbp_threshold_64x64, BLOCK_16X16,
Marco's avatar
Marco committed
772
                           force_split[0])) {
773 774 775
    for (i = 0; i < 4; ++i) {
      const int x32_idx = ((i & 1) << 2);
      const int y32_idx = ((i >> 1) << 2);
776
      const int i2 = i << 2;
777
      if (!set_vt_partitioning(cpi, xd, &vt.split[i], BLOCK_32X32,
778
                               (mi_row + y32_idx), (mi_col + x32_idx),
Yunqing Wang's avatar
Yunqing Wang committed
779
                               cpi->vbp_threshold_32x32,
Marco's avatar
Marco committed
780
                               BLOCK_16X16, force_split[i + 1])) {
781 782 783
        for (j = 0; j < 4; ++j) {
          const int x16_idx = ((j & 1) << 1);
          const int y16_idx = ((j >> 1) << 1);
784 785 786
          // For inter frames: if variance4x4downsample[] == 1 for this 16x16
          // block, then the variance is based on 4x4 down-sampling, so use vt2
          // in set_vt_partioning(), otherwise use vt.
787
          v16x16 *vtemp = (!is_key_frame &&
788 789 790
                           variance4x4downsample[i2 + j] == 1) ?
                           &vt2[i2 + j] : &vt.split[i].split[j];
          if (!set_vt_partitioning(cpi, xd, vtemp, BLOCK_16X16,
791
                                   mi_row + y32_idx + y16_idx,
792
                                   mi_col + x32_idx + x16_idx,
Yaowu Xu's avatar
Yaowu Xu committed
793
                                   cpi->vbp_threshold_16x16,
Marco's avatar
Marco committed
794
                                   cpi->vbp_bsize_min, 0)) {
795 796 797
            for (k = 0; k < 4; ++k) {
              const int x8_idx = (k & 1);
              const int y8_idx = (k >> 1);
798 799
              if (use_4x4_partition) {
                if (!set_vt_partitioning(cpi, xd, &vtemp->split[k],
800 801
                                         BLOCK_8X8,
                                         mi_row + y32_idx + y16_idx + y8_idx,
802
                                         mi_col + x32_idx + x16_idx + x8_idx,
Yunqing Wang's avatar
Yunqing Wang committed
803
                                         cpi->vbp_threshold_8x8,
Marco's avatar
Marco committed
804
                                         BLOCK_8X8, 0)) {
805 806 807 808
                  set_block_size(cpi, xd,
                                 (mi_row + y32_idx + y16_idx + y8_idx),
                                 (mi_col + x32_idx + x16_idx + x8_idx),
                                 BLOCK_4X4);
809 810 811 812 813 814
                }
              } else {
                set_block_size(cpi, xd,
                               (mi_row + y32_idx + y16_idx + y8_idx),
                               (mi_col + x32_idx + x16_idx + x8_idx),
                               BLOCK_8X8);
815
              }
816 817 818 819 820 821 822 823
            }
          }
        }
      }
    }
  }
}

824 825
static void update_state(VP9_COMP *cpi, ThreadData *td,
                         PICK_MODE_CONTEXT *ctx,
826 827
                         int mi_row, int mi_col, BLOCK_SIZE bsize,
                         int output_enabled) {
Ronald S. Bultje's avatar
Ronald S. Bultje committed
828
  int i, x_idx, y;
829
  VP9_COMMON *const cm = &cpi->common;
830 831
  RD_COUNTS *const rdc = &td->rd_counts;
  MACROBLOCK *const x = &td->mb;
832
  MACROBLOCKD *const xd = &x->e_mbd;
833 834
  struct macroblock_plane *const p = x->plane;
  struct macroblockd_plane *const pd = xd->plane;
John Koleszar's avatar
John Koleszar committed
835
  MODE_INFO *mi = &ctx->mic;
hkuang's avatar
hkuang committed
836 837
  MB_MODE_INFO *const mbmi = &xd->mi[0].src_mi->mbmi;
  MODE_INFO *mi_addr = &xd->mi[0];
838
  const struct segmentation *const seg = &cm->seg;
839 840 841 842 843 844 845
  const int bw = num_8x8_blocks_wide_lookup[mi->mbmi.sb_type];
  const int bh = num_8x8_blocks_high_lookup[mi->mbmi.sb_type];
  const int x_mis = MIN(bw, cm->mi_cols - mi_col);
  const int y_mis = MIN(bh, cm->mi_rows - mi_row);
  MV_REF *const frame_mvs =
      cm->cur_frame->mvs + mi_row * cm->mi_cols + mi_col;
  int w, h;
846

847
  const int mis = cm->mi_stride;
Jim Bankoski's avatar
Jim Bankoski committed
848
  const int mi_width = num_8x8_blocks_wide_lookup[bsize];
Jim Bankoski's avatar
Jim Bankoski committed
849
  const int mi_height = num_8x8_blocks_high_lookup[bsize];
850
  int max_plane;
Adrian Grange's avatar
Adrian Grange committed
851

852
  assert(mi->mbmi.sb_type == bsize);
853

854
  *mi_addr = *mi;
hkuang's avatar
hkuang committed
855
  mi_addr->src_mi = mi_addr;
856

Paul Wilkins's avatar
Paul Wilkins committed
857
  // If segmentation in use
858
  if (seg->enabled) {
Paul Wilkins's avatar
Paul Wilkins committed
859 860 861 862 863 864 865 866 867
    // For in frame complexity AQ copy the segment id from the segment map.
    if (cpi->oxcf.aq_mode == COMPLEXITY_AQ) {
      const uint8_t *const map = seg->update_map ? cpi->segmentation_map
                                                 : cm->last_frame_seg_map;
      mi_addr->mbmi.segment_id =
        vp9_get_segment_id(cm, map, bsize, mi_row, mi_col);
    }
    // Else for cyclic refresh mode update the segment map, set the segment id
    // and then update the quantizer.
868
    if (cpi->oxcf.aq_mode == CYCLIC_REFRESH_AQ) {
869
      vp9_cyclic_refresh_update_segment(cpi, &xd->mi[0].src_mi->mbmi, mi_row,
Marco's avatar
Marco committed
870 871
                                        mi_col, bsize, ctx->rate, ctx->dist,
                                        x->skip);
Paul Wilkins's avatar
Paul Wilkins committed
872
    }
873
  }
874

875 876
  max_plane = is_inter_block(mbmi) ? MAX_MB_PLANE : 1;
  for (i = 0; i < max_plane; ++i) {
Jingning Han's avatar