vp9_encodeframe.c 157 KB
Newer Older
John Koleszar's avatar
John Koleszar committed
1
/*
2
 *  Copyright (c) 2010 The WebM project authors. All Rights Reserved.
John Koleszar's avatar
John Koleszar committed
3
 *
4
 *  Use of this source code is governed by a BSD-style license
5
6
 *  that can be found in the LICENSE file in the root of the source
 *  tree. An additional intellectual property rights grant can be found
7
 *  in the file PATENTS.  All contributing project authors may
8
 *  be found in the AUTHORS file in the root of the source tree.
John Koleszar's avatar
John Koleszar committed
9
10
 */

Dmitry Kovalev's avatar
Dmitry Kovalev committed
11
12
13
14
#include <limits.h>
#include <math.h>
#include <stdio.h>

Jim Bankoski's avatar
Jim Bankoski committed
15
#include "./vp9_rtcd.h"
Johann's avatar
Johann committed
16
#include "./vpx_dsp_rtcd.h"
Dmitry Kovalev's avatar
Dmitry Kovalev committed
17
18
#include "./vpx_config.h"

19
#include "vpx_dsp/vpx_dsp_common.h"
20
#include "vpx_ports/mem.h"
Dmitry Kovalev's avatar
Dmitry Kovalev committed
21
#include "vpx_ports/vpx_timer.h"
22
#include "vpx_ports/system_state.h"
Dmitry Kovalev's avatar
Dmitry Kovalev committed
23

24
#include "vp9/common/vp9_common.h"
Yaowu Xu's avatar
Yaowu Xu committed
25
#include "vp9/common/vp9_entropy.h"
26
#include "vp9/common/vp9_entropymode.h"
27
#include "vp9/common/vp9_idct.h"
Dmitry Kovalev's avatar
Dmitry Kovalev committed
28
29
30
#include "vp9/common/vp9_mvref_common.h"
#include "vp9/common/vp9_pred_common.h"
#include "vp9/common/vp9_quant_common.h"
31
#include "vp9/common/vp9_reconintra.h"
Dmitry Kovalev's avatar
Dmitry Kovalev committed
32
#include "vp9/common/vp9_reconinter.h"
33
#include "vp9/common/vp9_seg_common.h"
34
#include "vp9/common/vp9_tile_common.h"
35

36
#include "vp9/encoder/vp9_aq_complexity.h"
Marco Paniconi's avatar
Marco Paniconi committed
37
38
#include "vp9/encoder/vp9_aq_cyclicrefresh.h"
#include "vp9/encoder/vp9_aq_variance.h"
Dmitry Kovalev's avatar
Dmitry Kovalev committed
39
40
41
#include "vp9/encoder/vp9_encodeframe.h"
#include "vp9/encoder/vp9_encodemb.h"
#include "vp9/encoder/vp9_encodemv.h"
42
#include "vp9/encoder/vp9_ethread.h"
43
#include "vp9/encoder/vp9_extend.h"
44
#include "vp9/encoder/vp9_pickmode.h"
45
#include "vp9/encoder/vp9_rd.h"
Dmitry Kovalev's avatar
Dmitry Kovalev committed
46
47
#include "vp9/encoder/vp9_rdopt.h"
#include "vp9/encoder/vp9_segmentation.h"
48
#include "vp9/encoder/vp9_tokenize.h"
49

50
51
static void encode_superblock(VP9_COMP *cpi, ThreadData * td,
                              TOKENEXTRA **t, int output_enabled,
52
53
                              int mi_row, int mi_col, BLOCK_SIZE bsize,
                              PICK_MODE_CONTEXT *ctx);
54

55
56
57
58
// This is used as a reference when computing the source variance for the
//  purposes of activity masking.
// Eventually this should be replaced by custom no-reference routines,
//  which will be faster.
59
static const uint8_t VP9_VAR_OFFS[64] = {
60
61
62
63
64
65
66
67
    128, 128, 128, 128, 128, 128, 128, 128,
    128, 128, 128, 128, 128, 128, 128, 128,
    128, 128, 128, 128, 128, 128, 128, 128,
    128, 128, 128, 128, 128, 128, 128, 128,
    128, 128, 128, 128, 128, 128, 128, 128,
    128, 128, 128, 128, 128, 128, 128, 128,
    128, 128, 128, 128, 128, 128, 128, 128,
    128, 128, 128, 128, 128, 128, 128, 128
68
69
};

70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
#if CONFIG_VP9_HIGHBITDEPTH
static const uint16_t VP9_HIGH_VAR_OFFS_8[64] = {
    128, 128, 128, 128, 128, 128, 128, 128,
    128, 128, 128, 128, 128, 128, 128, 128,
    128, 128, 128, 128, 128, 128, 128, 128,
    128, 128, 128, 128, 128, 128, 128, 128,
    128, 128, 128, 128, 128, 128, 128, 128,
    128, 128, 128, 128, 128, 128, 128, 128,
    128, 128, 128, 128, 128, 128, 128, 128,
    128, 128, 128, 128, 128, 128, 128, 128
};

static const uint16_t VP9_HIGH_VAR_OFFS_10[64] = {
    128*4, 128*4, 128*4, 128*4, 128*4, 128*4, 128*4, 128*4,
    128*4, 128*4, 128*4, 128*4, 128*4, 128*4, 128*4, 128*4,
    128*4, 128*4, 128*4, 128*4, 128*4, 128*4, 128*4, 128*4,
    128*4, 128*4, 128*4, 128*4, 128*4, 128*4, 128*4, 128*4,
    128*4, 128*4, 128*4, 128*4, 128*4, 128*4, 128*4, 128*4,
    128*4, 128*4, 128*4, 128*4, 128*4, 128*4, 128*4, 128*4,
    128*4, 128*4, 128*4, 128*4, 128*4, 128*4, 128*4, 128*4,
    128*4, 128*4, 128*4, 128*4, 128*4, 128*4, 128*4, 128*4
};

static const uint16_t VP9_HIGH_VAR_OFFS_12[64] = {
    128*16, 128*16, 128*16, 128*16, 128*16, 128*16, 128*16, 128*16,
    128*16, 128*16, 128*16, 128*16, 128*16, 128*16, 128*16, 128*16,
    128*16, 128*16, 128*16, 128*16, 128*16, 128*16, 128*16, 128*16,
    128*16, 128*16, 128*16, 128*16, 128*16, 128*16, 128*16, 128*16,
    128*16, 128*16, 128*16, 128*16, 128*16, 128*16, 128*16, 128*16,
    128*16, 128*16, 128*16, 128*16, 128*16, 128*16, 128*16, 128*16,
    128*16, 128*16, 128*16, 128*16, 128*16, 128*16, 128*16, 128*16,
    128*16, 128*16, 128*16, 128*16, 128*16, 128*16, 128*16, 128*16
};
#endif  // CONFIG_VP9_HIGHBITDEPTH

105
106
107
unsigned int vp9_get_sby_perpixel_variance(VP9_COMP *cpi,
                                           const struct buf_2d *ref,
                                           BLOCK_SIZE bs) {
108
109
110
  unsigned int sse;
  const unsigned int var = cpi->fn_ptr[bs].vf(ref->buf, ref->stride,
                                              VP9_VAR_OFFS, 0, &sse);
111
  return ROUND_POWER_OF_TWO(var, num_pels_log2_lookup[bs]);
112
113
}

114
#if CONFIG_VP9_HIGHBITDEPTH
115
unsigned int vp9_high_get_sby_perpixel_variance(
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
    VP9_COMP *cpi, const struct buf_2d *ref, BLOCK_SIZE bs, int bd) {
  unsigned int var, sse;
  switch (bd) {
    case 10:
      var = cpi->fn_ptr[bs].vf(ref->buf, ref->stride,
                               CONVERT_TO_BYTEPTR(VP9_HIGH_VAR_OFFS_10),
                               0, &sse);
      break;
    case 12:
      var = cpi->fn_ptr[bs].vf(ref->buf, ref->stride,
                               CONVERT_TO_BYTEPTR(VP9_HIGH_VAR_OFFS_12),
                               0, &sse);
      break;
    case 8:
    default:
      var = cpi->fn_ptr[bs].vf(ref->buf, ref->stride,
                               CONVERT_TO_BYTEPTR(VP9_HIGH_VAR_OFFS_8),
                               0, &sse);
      break;
  }
  return ROUND_POWER_OF_TWO(var, num_pels_log2_lookup[bs]);
}
#endif  // CONFIG_VP9_HIGHBITDEPTH

140
static unsigned int get_sby_perpixel_diff_variance(VP9_COMP *cpi,
141
142
                                                   const struct buf_2d *ref,
                                                   int mi_row, int mi_col,
143
                                                   BLOCK_SIZE bs) {
144
145
  unsigned int sse, var;
  uint8_t *last_y;
146
  const YV12_BUFFER_CONFIG *last = get_ref_frame_buffer(cpi, LAST_FRAME);
147
148
149
150
151

  assert(last != NULL);
  last_y =
      &last->y_buffer[mi_row * MI_SIZE * last->y_stride + mi_col * MI_SIZE];
  var = cpi->fn_ptr[bs].vf(ref->buf, ref->stride, last_y, last->y_stride, &sse);
152
153
154
  return ROUND_POWER_OF_TWO(var, num_pels_log2_lookup[bs]);
}

155
static BLOCK_SIZE get_rd_var_based_fixed_partition(VP9_COMP *cpi, MACROBLOCK *x,
156
157
                                                   int mi_row,
                                                   int mi_col) {
158
  unsigned int var = get_sby_perpixel_diff_variance(cpi, &x->plane[0].src,
159
160
161
                                                    mi_row, mi_col,
                                                    BLOCK_64X64);
  if (var < 8)
162
    return BLOCK_64X64;
163
  else if (var < 128)
164
    return BLOCK_32X32;
165
166
167
168
  else if (var < 2048)
    return BLOCK_16X16;
  else
    return BLOCK_8X8;
169
170
}

171
172
// Lighter version of set_offsets that only sets the mode info
// pointers.
Jingning Han's avatar
Jingning Han committed
173
static INLINE void set_mode_info_offsets(VP9_COMMON *const cm,
174
                                         MACROBLOCK *const x,
Jingning Han's avatar
Jingning Han committed
175
176
177
                                         MACROBLOCKD *const xd,
                                         int mi_row,
                                         int mi_col) {
178
  const int idx_str = xd->mi_stride * mi_row + mi_col;
179
180
  xd->mi = cm->mi_grid_visible + idx_str;
  xd->mi[0] = cm->mi + idx_str;
181
  x->mbmi_ext = x->mbmi_ext_base + (mi_row * cm->mi_cols + mi_col);
182
183
184
}

static void set_offsets(VP9_COMP *cpi, const TileInfo *const tile,
185
186
                        MACROBLOCK *const x, int mi_row, int mi_col,
                        BLOCK_SIZE bsize) {
187
188
189
190
191
192
193
  VP9_COMMON *const cm = &cpi->common;
  MACROBLOCKD *const xd = &x->e_mbd;
  MB_MODE_INFO *mbmi;
  const int mi_width = num_8x8_blocks_wide_lookup[bsize];
  const int mi_height = num_8x8_blocks_high_lookup[bsize];
  const struct segmentation *const seg = &cm->seg;

194
  set_skip_context(xd, mi_row, mi_col);
195

196
197
  set_mode_info_offsets(cm, x, xd, mi_row, mi_col);

198
  mbmi = &xd->mi[0]->mbmi;
199
200

  // Set up destination pointers.
201
  vp9_setup_dst_planes(xd->plane, get_frame_new_buffer(cm), mi_row, mi_col);
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218

  // Set up limit values for MV components.
  // Mv beyond the range do not produce new/different prediction block.
  x->mv_row_min = -(((mi_row + mi_height) * MI_SIZE) + VP9_INTERP_EXTEND);
  x->mv_col_min = -(((mi_col + mi_width) * MI_SIZE) + VP9_INTERP_EXTEND);
  x->mv_row_max = (cm->mi_rows - mi_row) * MI_SIZE + VP9_INTERP_EXTEND;
  x->mv_col_max = (cm->mi_cols - mi_col) * MI_SIZE + VP9_INTERP_EXTEND;

  // Set up distance of MB to edge of frame in 1/8th pel units.
  assert(!(mi_col & (mi_width - 1)) && !(mi_row & (mi_height - 1)));
  set_mi_row_col(xd, tile, mi_row, mi_height, mi_col, mi_width,
                 cm->mi_rows, cm->mi_cols);

  // Set up source buffers.
  vp9_setup_src_planes(x, cpi->Source, mi_row, mi_col);

  // R/D setup.
Dmitry Kovalev's avatar
Dmitry Kovalev committed
219
220
  x->rddiv = cpi->rd.RDDIV;
  x->rdmult = cpi->rd.RDMULT;
221
222
223
224
225
226

  // Setup segment ID.
  if (seg->enabled) {
    if (cpi->oxcf.aq_mode != VARIANCE_AQ) {
      const uint8_t *const map = seg->update_map ? cpi->segmentation_map
                                                 : cm->last_frame_seg_map;
227
      mbmi->segment_id = get_segment_id(cm, map, bsize, mi_row, mi_col);
228
229
230
231
232
233
234
235
    }
    vp9_init_plane_quantizers(cpi, x);

    x->encode_breakout = cpi->segment_encode_breakout[mbmi->segment_id];
  } else {
    mbmi->segment_id = 0;
    x->encode_breakout = cpi->encode_breakout;
  }
Scott LaVarnway's avatar
Scott LaVarnway committed
236
237
238

  // required by vp9_append_sub8x8_mvs_for_idx() and vp9_find_best_ref_mvs()
  xd->tile = *tile;
239
240
}

241
242
243
static void duplicate_mode_info_in_sb(VP9_COMMON *cm, MACROBLOCKD *xd,
                                      int mi_row, int mi_col,
                                      BLOCK_SIZE bsize) {
244
245
246
247
248
249
  const int block_width = num_8x8_blocks_wide_lookup[bsize];
  const int block_height = num_8x8_blocks_high_lookup[bsize];
  int i, j;
  for (j = 0; j < block_height; ++j)
    for (i = 0; i < block_width; ++i) {
      if (mi_row + j < cm->mi_rows && mi_col + i < cm->mi_cols)
250
        xd->mi[j * xd->mi_stride + i] = xd->mi[0];
251
252
253
254
    }
}

static void set_block_size(VP9_COMP * const cpi,
255
                           MACROBLOCK *const x,
256
                           MACROBLOCKD *const xd,
257
258
259
                           int mi_row, int mi_col,
                           BLOCK_SIZE bsize) {
  if (cpi->common.mi_cols > mi_col && cpi->common.mi_rows > mi_row) {
260
    set_mode_info_offsets(&cpi->common, x, xd, mi_row, mi_col);
261
    xd->mi[0]->mbmi.sb_type = bsize;
262
263
264
265
266
267
  }
}

typedef struct {
  int64_t sum_square_error;
  int64_t sum_error;
268
  int log2_count;
269
270
271
272
273
274
275
276
277
278
279
280
  int variance;
} var;

typedef struct {
  var none;
  var horz[2];
  var vert[2];
} partition_variance;

typedef struct {
  partition_variance part_variances;
  var split[4];
281
282
283
284
285
} v4x4;

typedef struct {
  partition_variance part_variances;
  v4x4 split[4];
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
} v8x8;

typedef struct {
  partition_variance part_variances;
  v8x8 split[4];
} v16x16;

typedef struct {
  partition_variance part_variances;
  v16x16 split[4];
} v32x32;

typedef struct {
  partition_variance part_variances;
  v32x32 split[4];
} v64x64;

typedef struct {
  partition_variance *part_variances;
  var *split[4];
} variance_node;

typedef enum {
  V16X16,
  V32X32,
  V64X64,
} TREE_LEVEL;

static void tree_to_node(void *data, BLOCK_SIZE bsize, variance_node *node) {
  int i;
316
  node->part_variances = NULL;
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
  switch (bsize) {
    case BLOCK_64X64: {
      v64x64 *vt = (v64x64 *) data;
      node->part_variances = &vt->part_variances;
      for (i = 0; i < 4; i++)
        node->split[i] = &vt->split[i].part_variances.none;
      break;
    }
    case BLOCK_32X32: {
      v32x32 *vt = (v32x32 *) data;
      node->part_variances = &vt->part_variances;
      for (i = 0; i < 4; i++)
        node->split[i] = &vt->split[i].part_variances.none;
      break;
    }
    case BLOCK_16X16: {
      v16x16 *vt = (v16x16 *) data;
      node->part_variances = &vt->part_variances;
      for (i = 0; i < 4; i++)
        node->split[i] = &vt->split[i].part_variances.none;
      break;
    }
    case BLOCK_8X8: {
      v8x8 *vt = (v8x8 *) data;
      node->part_variances = &vt->part_variances;
342
343
344
345
346
347
348
      for (i = 0; i < 4; i++)
        node->split[i] = &vt->split[i].part_variances.none;
      break;
    }
    case BLOCK_4X4: {
      v4x4 *vt = (v4x4 *) data;
      node->part_variances = &vt->part_variances;
349
350
351
352
353
354
      for (i = 0; i < 4; i++)
        node->split[i] = &vt->split[i];
      break;
    }
    default: {
      assert(0);
355
      break;
356
357
358
359
360
361
362
363
    }
  }
}

// Set variance values given sum square error, sum error, count.
static void fill_variance(int64_t s2, int64_t s, int c, var *v) {
  v->sum_square_error = s2;
  v->sum_error = s;
364
  v->log2_count = c;
365
366
367
}

static void get_variance(var *v) {
368
369
  v->variance = (int)(256 * (v->sum_square_error -
      ((v->sum_error * v->sum_error) >> v->log2_count)) >> v->log2_count);
370
371
}

372
static void sum_2_variances(const var *a, const var *b, var *r) {
373
  assert(a->log2_count == b->log2_count);
374
  fill_variance(a->sum_square_error + b->sum_square_error,
375
                a->sum_error + b->sum_error, a->log2_count + 1, r);
376
377
378
379
}

static void fill_variance_tree(void *data, BLOCK_SIZE bsize) {
  variance_node node;
380
  memset(&node, 0, sizeof(node));
381
382
383
384
385
386
387
388
389
390
  tree_to_node(data, bsize, &node);
  sum_2_variances(node.split[0], node.split[1], &node.part_variances->horz[0]);
  sum_2_variances(node.split[2], node.split[3], &node.part_variances->horz[1]);
  sum_2_variances(node.split[0], node.split[2], &node.part_variances->vert[0]);
  sum_2_variances(node.split[1], node.split[3], &node.part_variances->vert[1]);
  sum_2_variances(&node.part_variances->vert[0], &node.part_variances->vert[1],
                  &node.part_variances->none);
}

static int set_vt_partitioning(VP9_COMP *cpi,
391
                               MACROBLOCK *const x,
392
                               MACROBLOCKD *const xd,
393
394
395
                               void *data,
                               BLOCK_SIZE bsize,
                               int mi_row,
396
                               int mi_col,
397
                               int64_t threshold,
398
                               BLOCK_SIZE bsize_min,
Marco's avatar
Marco committed
399
                               int force_split) {
400
401
402
403
  VP9_COMMON * const cm = &cpi->common;
  variance_node vt;
  const int block_width = num_8x8_blocks_wide_lookup[bsize];
  const int block_height = num_8x8_blocks_high_lookup[bsize];
404

405
406
407
  assert(block_height == block_width);
  tree_to_node(data, bsize, &vt);

408
  if (force_split == 1)
409
410
    return 0;

411
  // For bsize=bsize_min (16x16/8x8 for 8x8/4x4 downsampling), select if
412
413
  // variance is below threshold, otherwise split will be selected.
  // No check for vert/horiz split as too few samples for variance.
414
  if (bsize == bsize_min) {
415
    // Variance already computed to set the force_split.
416
    if (cm->frame_type == KEY_FRAME)
417
      get_variance(&vt.part_variances->none);
418
419
    if (mi_col + block_width / 2 < cm->mi_cols &&
        mi_row + block_height / 2 < cm->mi_rows &&
420
        vt.part_variances->none.variance < threshold) {
421
      set_block_size(cpi, x, xd, mi_row, mi_col, bsize);
422
423
424
      return 1;
    }
    return 0;
425
  } else if (bsize > bsize_min) {
426
    // Variance already computed to set the force_split.
427
    if (cm->frame_type == KEY_FRAME)
Marco's avatar
Marco committed
428
      get_variance(&vt.part_variances->none);
429
    // For key frame: take split for bsize above 32X32 or very high variance.
430
431
    if (cm->frame_type == KEY_FRAME &&
        (bsize > BLOCK_32X32 ||
432
        vt.part_variances->none.variance > (threshold << 4))) {
433
434
435
436
437
      return 0;
    }
    // If variance is low, take the bsize (no split).
    if (mi_col + block_width / 2 < cm->mi_cols &&
        mi_row + block_height / 2 < cm->mi_rows &&
438
        vt.part_variances->none.variance < threshold) {
439
      set_block_size(cpi, x, xd, mi_row, mi_col, bsize);
440
441
      return 1;
    }
442

443
    // Check vertical split.
444
    if (mi_row + block_height / 2 < cm->mi_rows) {
Yaowu Xu's avatar
Yaowu Xu committed
445
      BLOCK_SIZE subsize = get_subsize(bsize, PARTITION_VERT);
446
447
      get_variance(&vt.part_variances->vert[0]);
      get_variance(&vt.part_variances->vert[1]);
448
      if (vt.part_variances->vert[0].variance < threshold &&
Yaowu Xu's avatar
Yaowu Xu committed
449
450
          vt.part_variances->vert[1].variance < threshold &&
          get_plane_block_size(subsize, &xd->plane[1]) < BLOCK_INVALID) {
451
452
        set_block_size(cpi, x, xd, mi_row, mi_col, subsize);
        set_block_size(cpi, x, xd, mi_row, mi_col + block_width / 2, subsize);
453
454
        return 1;
      }
455
    }
456
    // Check horizontal split.
457
    if (mi_col + block_width / 2 < cm->mi_cols) {
Yaowu Xu's avatar
Yaowu Xu committed
458
      BLOCK_SIZE subsize = get_subsize(bsize, PARTITION_HORZ);
459
460
      get_variance(&vt.part_variances->horz[0]);
      get_variance(&vt.part_variances->horz[1]);
461
      if (vt.part_variances->horz[0].variance < threshold &&
Yaowu Xu's avatar
Yaowu Xu committed
462
463
          vt.part_variances->horz[1].variance < threshold &&
          get_plane_block_size(subsize, &xd->plane[1]) < BLOCK_INVALID) {
464
465
        set_block_size(cpi, x, xd, mi_row, mi_col, subsize);
        set_block_size(cpi, x, xd, mi_row + block_height / 2, mi_col, subsize);
466
467
        return 1;
      }
468
    }
469

470
    return 0;
471
472
473
474
  }
  return 0;
}

Marco's avatar
Marco committed
475
476
477
478
479
480
481
482
// Set the variance split thresholds for following the block sizes:
// 0 - threshold_64x64, 1 - threshold_32x32, 2 - threshold_16x16,
// 3 - vbp_threshold_8x8. vbp_threshold_8x8 (to split to 4x4 partition) is
// currently only used on key frame.
static void set_vbp_thresholds(VP9_COMP *cpi, int64_t thresholds[], int q) {
  VP9_COMMON *const cm = &cpi->common;
  const int is_key_frame = (cm->frame_type == KEY_FRAME);
  const int threshold_multiplier = is_key_frame ? 20 : 1;
Marco's avatar
Marco committed
483
  int64_t threshold_base = (int64_t)(threshold_multiplier *
Marco's avatar
Marco committed
484
485
486
487
488
489
490
      cpi->y_dequant[q][1]);
  if (is_key_frame) {
    thresholds[0] = threshold_base;
    thresholds[1] = threshold_base >> 2;
    thresholds[2] = threshold_base >> 2;
    thresholds[3] = threshold_base << 2;
  } else {
491
    // Increase base variance threshold based on  estimated noise level.
492
    if (cpi->noise_estimate.enabled) {
493
494
495
      NOISE_LEVEL noise_level = vp9_noise_estimate_extract_level(
          &cpi->noise_estimate);
      if (noise_level == kHigh)
496
        threshold_base = 3 * threshold_base;
497
498
      else if (noise_level == kMedium)
        threshold_base = threshold_base << 1;
499
    }
Marco's avatar
Marco committed
500
    if (cm->width <= 352 && cm->height <= 288) {
501
502
      thresholds[0] = threshold_base >> 3;
      thresholds[1] = threshold_base >> 1;
503
      thresholds[2] = threshold_base << 3;
Marco's avatar
Marco committed
504
505
506
    } else {
      thresholds[0] = threshold_base;
      thresholds[1] = (5 * threshold_base) >> 2;
507
508
      if (cm->width >= 1920 && cm->height >= 1080)
        thresholds[1] = (7 * threshold_base) >> 2;
Marco's avatar
Marco committed
509
510
511
512
513
514
515
      thresholds[2] = threshold_base << cpi->oxcf.speed;
    }
  }
}

void vp9_set_variance_partition_thresholds(VP9_COMP *cpi, int q) {
  VP9_COMMON *const cm = &cpi->common;
Yaowu Xu's avatar
Yaowu Xu committed
516
  SPEED_FEATURES *const sf = &cpi->sf;
Marco's avatar
Marco committed
517
  const int is_key_frame = (cm->frame_type == KEY_FRAME);
518
519
  if (sf->partition_search_type != VAR_BASED_PARTITION &&
      sf->partition_search_type != REFERENCE_PARTITION) {
Yaowu Xu's avatar
Yaowu Xu committed
520
521
    return;
  } else {
Marco's avatar
Marco committed
522
523
    set_vbp_thresholds(cpi, cpi->vbp_thresholds, q);
    // The thresholds below are not changed locally.
524
    if (is_key_frame) {
525
      cpi->vbp_threshold_sad = 0;
526
527
      cpi->vbp_bsize_min = BLOCK_8X8;
    } else {
Marco's avatar
Marco committed
528
      if (cm->width <= 352 && cm->height <= 288)
529
        cpi->vbp_threshold_sad = 10;
Marco's avatar
Marco committed
530
      else
531
532
        cpi->vbp_threshold_sad = (cpi->y_dequant[q][1] << 1) > 1000 ?
            (cpi->y_dequant[q][1] << 1) : 1000;
533
534
      cpi->vbp_bsize_min = BLOCK_16X16;
    }
535
    cpi->vbp_threshold_minmax = 15 + (q >> 3);
Yaowu Xu's avatar
Yaowu Xu committed
536
537
538
  }
}

539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
// Compute the minmax over the 8x8 subblocks.
static int compute_minmax_8x8(const uint8_t *s, int sp, const uint8_t *d,
                              int dp, int x16_idx, int y16_idx,
#if CONFIG_VP9_HIGHBITDEPTH
                              int highbd_flag,
#endif
                              int pixels_wide,
                              int pixels_high) {
  int k;
  int minmax_max = 0;
  int minmax_min = 255;
  // Loop over the 4 8x8 subblocks.
  for (k = 0; k < 4; k++) {
    int x8_idx = x16_idx + ((k & 1) << 3);
    int y8_idx = y16_idx + ((k >> 1) << 3);
    int min = 0;
    int max = 0;
    if (x8_idx < pixels_wide && y8_idx < pixels_high) {
#if CONFIG_VP9_HIGHBITDEPTH
      if (highbd_flag & YV12_FLAG_HIGHBITDEPTH) {
James Zern's avatar
James Zern committed
559
        vpx_highbd_minmax_8x8(s + y8_idx * sp + x8_idx, sp,
560
561
562
                              d + y8_idx * dp + x8_idx, dp,
                              &min, &max);
      } else {
James Zern's avatar
James Zern committed
563
        vpx_minmax_8x8(s + y8_idx * sp + x8_idx, sp,
564
565
566
567
                       d + y8_idx * dp + x8_idx, dp,
                       &min, &max);
      }
#else
James Zern's avatar
James Zern committed
568
      vpx_minmax_8x8(s + y8_idx * sp + x8_idx, sp,
569
570
571
572
573
574
575
576
577
578
579
580
                     d + y8_idx * dp + x8_idx, dp,
                     &min, &max);
#endif
      if ((max - min) > minmax_max)
        minmax_max = (max - min);
      if ((max - min) < minmax_min)
        minmax_min = (max - min);
    }
  }
  return (minmax_max - minmax_min);
}

581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
static void fill_variance_4x4avg(const uint8_t *s, int sp, const uint8_t *d,
                                 int dp, int x8_idx, int y8_idx, v8x8 *vst,
#if CONFIG_VP9_HIGHBITDEPTH
                                 int highbd_flag,
#endif
                                 int pixels_wide,
                                 int pixels_high,
                                 int is_key_frame) {
  int k;
  for (k = 0; k < 4; k++) {
    int x4_idx = x8_idx + ((k & 1) << 2);
    int y4_idx = y8_idx + ((k >> 1) << 2);
    unsigned int sse = 0;
    int sum = 0;
    if (x4_idx < pixels_wide && y4_idx < pixels_high) {
      int s_avg;
      int d_avg = 128;
#if CONFIG_VP9_HIGHBITDEPTH
      if (highbd_flag & YV12_FLAG_HIGHBITDEPTH) {
James Zern's avatar
James Zern committed
600
        s_avg = vpx_highbd_avg_4x4(s + y4_idx * sp + x4_idx, sp);
601
        if (!is_key_frame)
James Zern's avatar
James Zern committed
602
          d_avg = vpx_highbd_avg_4x4(d + y4_idx * dp + x4_idx, dp);
603
      } else {
James Zern's avatar
James Zern committed
604
        s_avg = vpx_avg_4x4(s + y4_idx * sp + x4_idx, sp);
605
        if (!is_key_frame)
James Zern's avatar
James Zern committed
606
          d_avg = vpx_avg_4x4(d + y4_idx * dp + x4_idx, dp);
607
608
      }
#else
James Zern's avatar
James Zern committed
609
      s_avg = vpx_avg_4x4(s + y4_idx * sp + x4_idx, sp);
610
      if (!is_key_frame)
James Zern's avatar
James Zern committed
611
        d_avg = vpx_avg_4x4(d + y4_idx * dp + x4_idx, dp);
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
#endif
      sum = s_avg - d_avg;
      sse = sum * sum;
    }
    fill_variance(sse, sum, 0, &vst->split[k].part_variances.none);
  }
}

static void fill_variance_8x8avg(const uint8_t *s, int sp, const uint8_t *d,
                                 int dp, int x16_idx, int y16_idx, v16x16 *vst,
#if CONFIG_VP9_HIGHBITDEPTH
                                 int highbd_flag,
#endif
                                 int pixels_wide,
                                 int pixels_high,
                                 int is_key_frame) {
  int k;
  for (k = 0; k < 4; k++) {
    int x8_idx = x16_idx + ((k & 1) << 3);
    int y8_idx = y16_idx + ((k >> 1) << 3);
    unsigned int sse = 0;
    int sum = 0;
    if (x8_idx < pixels_wide && y8_idx < pixels_high) {
      int s_avg;
      int d_avg = 128;
#if CONFIG_VP9_HIGHBITDEPTH
      if (highbd_flag & YV12_FLAG_HIGHBITDEPTH) {
James Zern's avatar
James Zern committed
639
        s_avg = vpx_highbd_avg_8x8(s + y8_idx * sp + x8_idx, sp);
640
        if (!is_key_frame)
James Zern's avatar
James Zern committed
641
          d_avg = vpx_highbd_avg_8x8(d + y8_idx * dp + x8_idx, dp);
642
      } else {
James Zern's avatar
James Zern committed
643
        s_avg = vpx_avg_8x8(s + y8_idx * sp + x8_idx, sp);
644
        if (!is_key_frame)
James Zern's avatar
James Zern committed
645
          d_avg = vpx_avg_8x8(d + y8_idx * dp + x8_idx, dp);
646
647
      }
#else
James Zern's avatar
James Zern committed
648
      s_avg = vpx_avg_8x8(s + y8_idx * sp + x8_idx, sp);
649
      if (!is_key_frame)
James Zern's avatar
James Zern committed
650
        d_avg = vpx_avg_8x8(d + y8_idx * dp + x8_idx, dp);
651
652
653
654
655
656
657
658
#endif
      sum = s_avg - d_avg;
      sse = sum * sum;
    }
    fill_variance(sse, sum, 0, &vst->split[k].part_variances.none);
  }
}

659
// This function chooses partitioning based on the variance between source and
660
// reconstructed last, where variance is computed for down-sampled inputs.
661
static int choose_partitioning(VP9_COMP *cpi,
662
                                const TileInfo *const tile,
663
                                MACROBLOCK *x,
664
665
                                int mi_row, int mi_col) {
  VP9_COMMON * const cm = &cpi->common;
666
  MACROBLOCKD *xd = &x->e_mbd;
667
  int i, j, k, m;
668
  v64x64 vt;
669
  v16x16 vt2[16];
670
  int force_split[21];
671
672
673
674
675
  uint8_t *s;
  const uint8_t *d;
  int sp;
  int dp;
  int pixels_wide = 64, pixels_high = 64;
676
677
  int64_t thresholds[4] = {cpi->vbp_thresholds[0], cpi->vbp_thresholds[1],
      cpi->vbp_thresholds[2], cpi->vbp_thresholds[3]};
Yaowu Xu's avatar
Yaowu Xu committed
678

679
  // Always use 4x4 partition for key frame.
680
681
  const int is_key_frame = (cm->frame_type == KEY_FRAME);
  const int use_4x4_partition = is_key_frame;
Yaowu Xu's avatar
Yaowu Xu committed
682
  const int low_res = (cm->width <= 352 && cm->height <= 288);
683
684
  int variance4x4downsample[16];

685
686
687
688
  int segment_id = CR_SEGMENT_ID_BASE;
  if (cpi->oxcf.aq_mode == CYCLIC_REFRESH_AQ && cm->seg.enabled) {
    const uint8_t *const map = cm->seg.update_map ? cpi->segmentation_map :
                                                    cm->last_frame_seg_map;
689
    segment_id = get_segment_id(cm, map, BLOCK_64X64, mi_row, mi_col);
690
691
692

    if (cyclic_refresh_segment_id_boosted(segment_id)) {
      int q = vp9_get_qindex(&cm->seg, segment_id, cm->base_qindex);
Marco's avatar
Marco committed
693
      set_vbp_thresholds(cpi, thresholds, q);
694
    }
695
696
  }

697
  set_offsets(cpi, tile, x, mi_row, mi_col, BLOCK_64X64);
698
699
700
701
702
703
704
705
706

  if (xd->mb_to_right_edge < 0)
    pixels_wide += (xd->mb_to_right_edge >> 3);
  if (xd->mb_to_bottom_edge < 0)
    pixels_high += (xd->mb_to_bottom_edge >> 3);

  s = x->plane[0].src.buf;
  sp = x->plane[0].src.stride;

707
708
709
710
711
  if (!is_key_frame && !(is_one_pass_cbr_svc(cpi) &&
      cpi->svc.layer_context[cpi->svc.temporal_layer_id].is_key_frame)) {
    // In the case of spatial/temporal scalable coding, the assumption here is
    // that the temporal reference frame will always be of type LAST_FRAME.
    // TODO(marpan): If that assumption is broken, we need to revisit this code.
712
    MB_MODE_INFO *mbmi = &xd->mi[0]->mbmi;
713
    unsigned int uv_sad;
714
    const YV12_BUFFER_CONFIG *yv12 = get_ref_frame_buffer(cpi, LAST_FRAME);
715

716
    const YV12_BUFFER_CONFIG *yv12_g = NULL;
717
    unsigned int y_sad, y_sad_g;
Yaowu Xu's avatar
Yaowu Xu committed
718
719
    const BLOCK_SIZE bsize = BLOCK_32X32
        + (mi_col + 4 < cm->mi_cols) * 2 + (mi_row + 4 < cm->mi_rows);
720

721
    assert(yv12 != NULL);
722
723
724
725
726
727
728

    if (!(is_one_pass_cbr_svc(cpi) && cpi->svc.spatial_layer_id)) {
      // For now, GOLDEN will not be used for non-zero spatial layers, since
      // it may not be a temporal reference.
      yv12_g = get_ref_frame_buffer(cpi, GOLDEN_FRAME);
    }

729
730
    if (yv12_g && yv12_g != yv12 &&
       (cpi->ref_frame_flags & VP9_GOLD_FLAG)) {
731
732
733
734
735
736
737
738
739
      vp9_setup_pre_planes(xd, 0, yv12_g, mi_row, mi_col,
                           &cm->frame_refs[GOLDEN_FRAME - 1].sf);
      y_sad_g = cpi->fn_ptr[bsize].sdf(x->plane[0].src.buf,
                                       x->plane[0].src.stride,
                                       xd->plane[0].pre[0].buf,
                                       xd->plane[0].pre[0].stride);
    } else {
      y_sad_g = UINT_MAX;
    }
740

Yaowu Xu's avatar
Yaowu Xu committed
741
    vp9_setup_pre_planes(xd, 0, yv12, mi_row, mi_col,
742
                         &cm->frame_refs[LAST_FRAME - 1].sf);
743
744
745
746
    mbmi->ref_frame[0] = LAST_FRAME;
    mbmi->ref_frame[1] = NONE;
    mbmi->sb_type = BLOCK_64X64;
    mbmi->mv[0].as_int = 0;
747
    mbmi->interp_filter = BILINEAR;
748

749
    y_sad = vp9_int_pro_motion_estimation(cpi, x, bsize, mi_row, mi_col);
750
751
752
753
754
755
756
757
758
    if (y_sad_g < y_sad) {
      vp9_setup_pre_planes(xd, 0, yv12_g, mi_row, mi_col,
                           &cm->frame_refs[GOLDEN_FRAME - 1].sf);
      mbmi->ref_frame[0] = GOLDEN_FRAME;
      mbmi->mv[0].as_int = 0;
      y_sad = y_sad_g;
    } else {
      x->pred_mv[LAST_FRAME] = mbmi->mv[0].as_mv;
    }
759

760
    vp9_build_inter_predictors_sb(xd, mi_row, mi_col, BLOCK_64X64);
761
762
763
764

    for (i = 1; i <= 2; ++i) {
      struct macroblock_plane  *p = &x->plane[i];
      struct macroblockd_plane *pd = &xd->plane[i];
Yaowu Xu's avatar
Yaowu Xu committed
765
      const BLOCK_SIZE bs = get_plane_block_size(bsize, pd);
766

Yaowu Xu's avatar
Yaowu Xu committed
767
      if (bs == BLOCK_INVALID)
768
        uv_sad = UINT_MAX;
Yaowu Xu's avatar
Yaowu Xu committed
769
770
771
      else
        uv_sad = cpi->fn_ptr[bs].sdf(p->src.buf, p->src.stride,
                                     pd->dst.buf, pd->dst.stride);
772

773
      x->color_sensitivity[i - 1] = uv_sad > (y_sad >> 2);
774
    }
775
776
777

    d = xd->plane[0].dst.buf;
    dp = xd->plane[0].dst.stride;
778
779
780
781
782
783
784
785
786

    // If the y_sad is very small, take 64x64 as partition and exit.
    // Don't check on boosted segment for now, as 64x64 is suppressed there.
    if (segment_id == CR_SEGMENT_ID_BASE &&
        y_sad < cpi->vbp_threshold_sad) {
      const int block_width = num_8x8_blocks_wide_lookup[BLOCK_64X64];
      const int block_height = num_8x8_blocks_high_lookup[BLOCK_64X64];
      if (mi_col + block_width / 2 < cm->mi_cols &&
          mi_row + block_height / 2 < cm->mi_rows) {
787
        set_block_size(cpi, x, xd, mi_row, mi_col, BLOCK_64X64);
788
789
790
        return 0;
      }
    }
791
792
793
  } else {
    d = VP9_VAR_OFFS;
    dp = 0;
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
#if CONFIG_VP9_HIGHBITDEPTH
    if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
      switch (xd->bd) {
        case 10:
          d = CONVERT_TO_BYTEPTR(VP9_HIGH_VAR_OFFS_10);
          break;
        case 12:
          d = CONVERT_TO_BYTEPTR(VP9_HIGH_VAR_OFFS_12);
          break;
        case 8:
        default:
          d = CONVERT_TO_BYTEPTR(VP9_HIGH_VAR_OFFS_8);
          break;
      }
    }
#endif  // CONFIG_VP9_HIGHBITDEPTH
810
811
  }

Marco's avatar
Marco committed
812
  // Index for force_split: 0 for 64x64, 1-4 for 32x32 blocks,
813
  // 5-20 for the 16x16 blocks.
Marco's avatar
Marco committed
814
  force_split[0] = 0;
815
816
  // Fill in the entire tree of 8x8 (or 4x4 under some conditions) variances
  // for splits.
817
818
819
  for (i = 0; i < 4; i++) {
    const int x32_idx = ((i & 1) << 5);
    const int y32_idx = ((i >> 1) << 5);
820
    const int i2 = i << 2;
Marco's avatar
Marco committed
821
    force_split[i + 1] = 0;
822
823
824
    for (j = 0; j < 4; j++) {
      const int x16_idx = x32_idx + ((j & 1) << 4);
      const int y16_idx = y32_idx + ((j >> 1) << 4);
825
      const int split_index = 5 + i2 + j;
826
      v16x16 *vst = &vt.split[i].split[j];
827
      force_split[split_index] = 0;
828
      variance4x4downsample[i2 + j] = 0;
829
      if (!is_key_frame) {
830
        fill_variance_8x8avg(s, sp, d, dp, x16_idx, y16_idx, vst,
831
#if CONFIG_VP9_HIGHBITDEPTH
832
                            xd->cur_buf->flags,
833
#endif
834
835
836
                            pixels_wide,
                            pixels_high,
                            is_key_frame);
837
        fill_variance_tree(&vt.split[i].split[j], BLOCK_16X16);
838
839
840
841
842
843
844
845
        get_variance(&vt.split[i].split[j].part_variances.none);
        if (vt.split[i].split[j].part_variances.none.variance >
            thresholds[2]) {
          // 16X16 variance is above threshold for split, so force split to 8x8
          // for this 16x16 block (this also forces splits for upper levels).
          force_split[split_index] = 1;
          force_split[i + 1] = 1;
          force_split[0] = 1;
846
847
        } else if (cpi->oxcf.speed < 8 &&
                   vt.split[i].split[j].part_variances.none.variance >
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
                   thresholds[1] &&
                   !cyclic_refresh_segment_id_boosted(segment_id)) {
          // We have some nominal amount of 16x16 variance (based on average),
          // compute the minmax over the 8x8 sub-blocks, and if above threshold,
          // force split to 8x8 block for this 16x16 block.
          int minmax = compute_minmax_8x8(s, sp, d, dp, x16_idx, y16_idx,
#if CONFIG_VP9_HIGHBITDEPTH
                                          xd->cur_buf->flags,
#endif
                                          pixels_wide, pixels_high);
          if (minmax > cpi->vbp_threshold_minmax) {
            force_split[split_index] = 1;
            force_split[i + 1] = 1;
            force_split[0] = 1;
          }
        }
864
      }
Marco's avatar
Marco committed
865
866
867
      // TODO(marpan): There is an issue with variance based on 4x4 average in
      // svc mode, don't allow it for now.
      if (is_key_frame || (low_res && !cpi->use_svc &&
868
          vt.split[i].split[j].part_variances.none.variance >
869
          (thresholds[1] << 1))) {
870
        force_split[split_index] = 0;
871
872
873
874
875
        // Go down to 4x4 down-sampling for variance.
        variance4x4downsample[i2 + j] = 1;
        for (k = 0; k < 4; k++) {
          int x8_idx = x16_idx + ((k & 1) << 3);
          int y8_idx = y16_idx + ((k >> 1) << 3);
876
          v8x8 *vst2 = is_key_frame ? &vst->split[k] :
877
              &vt2[i2 + j].split[k];
878
          fill_variance_4x4avg(s, sp, d, dp, x8_idx, y8_idx, vst2,
879
#if CONFIG_VP9_HIGHBITDEPTH
880
                               xd->cur_buf->flags,
881
#endif
882
883
884
                               pixels_wide,
                               pixels_high,
                               is_key_frame);
885
        }
886
887
888
      }
    }
  }
889

890
891
  // Fill the rest of the variance tree by summing split partition values.
  for (i = 0; i < 4; i++) {
892
    const int i2 = i << 2;
893
    for (j = 0; j < 4; j++) {
894
      if (variance4x4downsample[i2 + j] == 1) {
895
        v16x16 *vtemp = (!is_key_frame) ? &vt2[i2 + j] :
896
            &vt.split[i].split[j];
Marco's avatar
Marco committed
897
        for (m = 0; m < 4; m++)
898
899
          fill_variance_tree(&vtemp->split[m], BLOCK_8X8);
        fill_variance_tree(vtemp, BLOCK_16X16);
900
901
902
903
904
905
906
907
        // If variance of this 16x16 block is above the threshold, force block
        // to split. This also forces a split on the upper levels.
        get_variance(&vtemp->part_variances.none);
        if (vtemp->part_variances.none.variance > thresholds[2]) {
          force_split[5 + i2 + j] = 1;
          force_split[i + 1] = 1;
          force_split[0] = 1;
        }
908
      }
909
910
    }
    fill_variance_tree(&vt.split[i], BLOCK_32X32);
Marco's avatar
Marco committed
911
912
    // If variance of this 32x32 block is above the threshold, force the block
    // to split. This also forces a split on the upper (64x64) level.
913
914
915
916
917
918
    if (!force_split[i + 1]) {
      get_variance(&vt.split[i].part_variances.none);
      if (vt.split[i].part_variances.none.variance > thresholds[1]) {
        force_split[i + 1] = 1;
        force_split[0] = 1;
      }
Marco's avatar
Marco committed
919
    }
920
  }
921
  if (!force_split[0]) {
Marco's avatar
Marco committed
922
    fill_variance_tree(&vt, BLOCK_64X64);
923
924
    get_variance(&vt.part_variances.none);
  }
925

926
  // Now go through the entire structure, splitting every block size until
927
  // we get to one that's got a variance lower than our threshold.
928
  if ( mi_col + 8 > cm->mi_cols || mi_row + 8 > cm->mi_rows ||
929
      !set_vt_partitioning(cpi, x, xd, &vt, BLOCK_64X64, mi_row, mi_col,
930
                           thresholds[0], BLOCK_16X16, force_split[0])) {
931
932
933
    for (i = 0; i < 4; ++i) {
      const int x32_idx = ((i & 1) << 2);
      const int y32_idx = ((i >> 1) << 2);
934
      const int i2 = i << 2;
935
      if (!set_vt_partitioning(cpi, x, xd, &vt.split[i], BLOCK_32X32,
936
                               (mi_row + y32_idx), (mi_col + x32_idx),
937
938
                               thresholds[1], BLOCK_16X16,
                               force_split[i + 1])) {
939
940
941
        for (j = 0; j < 4; ++j) {
          const int x16_idx = ((j & 1) << 1);
          const int y16_idx = ((j >> 1) << 1);
942
943
944
          // For inter frames: if variance4x4downsample[] == 1 for this 16x16
          // block, then the variance is based on 4x4 down-sampling, so use vt2
          // in set_vt_partioning(), otherwise use vt.
945
          v16x16 *vtemp = (!is_key_frame &&
946
947
                           variance4x4downsample[i2 + j] == 1) ?
                           &vt2[i2 + j] : &vt.split[i].split[j];
948
          if (!set_vt_partitioning(cpi, x, xd, vtemp, BLOCK_16X16,
949
                                   mi_row + y32_idx + y16_idx,
950
                                   mi_col + x32_idx + x16_idx,
951
952
953
                                   thresholds[2],
                                   cpi->vbp_bsize_min,
                                   force_split[5 + i2  + j])) {
954
955
956
            for (k = 0; k < 4; ++k) {
              const int x8_idx = (k & 1);
              const int y8_idx = (k >> 1);
957
              if (use_4x4_partition) {
958
                if (!set_vt_partitioning(cpi, x, xd, &vtemp->split[k],
959
960
                                         BLOCK_8X8,
                                         mi_row + y32_idx + y16_idx + y8_idx,
961
                                         mi_col + x32_idx + x16_idx + x8_idx,
962
                                         thresholds[3], BLOCK_8X8, 0)) {
963
                  set_block_size(cpi, x, xd,
964
965
966
                                 (mi_row + y32_idx + y16_idx + y8_idx),
                                 (mi_col + x32_idx + x16_idx + x8_idx),
                                 BLOCK_4X4);
967
968
                }
              } else {
969
                set_block_size(cpi, x, xd,
970
971
972
                               (mi_row + y32_idx + y16_idx + y8_idx),
                               (mi_col + x32_idx + x16_idx + x8_idx),
                               BLOCK_8X8);