vp9_pickmode.c 50.2 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
/*
 *  Copyright (c) 2014 The WebM project authors. All Rights Reserved.
 *
 *  Use of this source code is governed by a BSD-style license
 *  that can be found in the LICENSE file in the root of the source
 *  tree. An additional intellectual property rights grant can be found
 *  in the file PATENTS.  All contributing project authors may
 *  be found in the AUTHORS file in the root of the source tree.
 */

#include <assert.h>
12
13
14
#include <limits.h>
#include <math.h>
#include <stdio.h>
15

16
17
18
19
#include "./vp9_rtcd.h"

#include "vpx_mem/vpx_mem.h"

20
#include "vp9/common/vp9_blockd.h"
21
22
#include "vp9/common/vp9_common.h"
#include "vp9/common/vp9_mvref_common.h"
23
24
#include "vp9/common/vp9_reconinter.h"
#include "vp9/common/vp9_reconintra.h"
25

Dmitry Kovalev's avatar
Dmitry Kovalev committed
26
#include "vp9/encoder/vp9_encoder.h"
27
#include "vp9/encoder/vp9_pickmode.h"
28
#include "vp9/encoder/vp9_ratectrl.h"
29
#include "vp9/encoder/vp9_rd.h"
30

31
32
33
34
35
36
typedef struct {
  uint8_t *data;
  int stride;
  int in_use;
} PRED_BUFFER;

37
static int mv_refs_rt(const VP9_COMMON *cm, const MACROBLOCKD *xd,
38
39
40
41
                      const TileInfo *const tile,
                      MODE_INFO *mi, MV_REFERENCE_FRAME ref_frame,
                      int_mv *mv_ref_list,
                      int mi_row, int mi_col) {
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
  const int *ref_sign_bias = cm->ref_frame_sign_bias;
  int i, refmv_count = 0;

  const POSITION *const mv_ref_search = mv_ref_blocks[mi->mbmi.sb_type];

  int different_ref_found = 0;
  int context_counter = 0;
  int const_motion = 0;

  // Blank the reference vector list
  vpx_memset(mv_ref_list, 0, sizeof(*mv_ref_list) * MAX_MV_REF_CANDIDATES);

  // The nearest 2 blocks are treated differently
  // if the size < 8x8 we get the mv from the bmi substructure,
  // and we also need to keep a mode count.
  for (i = 0; i < 2; ++i) {
    const POSITION *const mv_ref = &mv_ref_search[i];
    if (is_inside(tile, mi_col, mi_row, cm->mi_rows, mv_ref)) {
      const MODE_INFO *const candidate_mi = xd->mi[mv_ref->col + mv_ref->row *
hkuang's avatar
hkuang committed
61
                                                   xd->mi_stride].src_mi;
62
63
64
65
66
67
      const MB_MODE_INFO *const candidate = &candidate_mi->mbmi;
      // Keep counts for entropy encoding.
      context_counter += mode_2_counter[candidate->mode];
      different_ref_found = 1;

      if (candidate->ref_frame[0] == ref_frame)
68
69
        ADD_MV_REF_LIST(get_sub_block_mv(candidate_mi, 0, mv_ref->col, -1),
                        refmv_count, mv_ref_list, Done);
70
71
72
73
74
75
76
77
78
79
80
81
    }
  }

  const_motion = 1;

  // Check the rest of the neighbors in much the same way
  // as before except we don't need to keep track of sub blocks or
  // mode counts.
  for (; i < MVREF_NEIGHBOURS && !refmv_count; ++i) {
    const POSITION *const mv_ref = &mv_ref_search[i];
    if (is_inside(tile, mi_col, mi_row, cm->mi_rows, mv_ref)) {
      const MB_MODE_INFO *const candidate = &xd->mi[mv_ref->col + mv_ref->row *
hkuang's avatar
hkuang committed
82
                                                    xd->mi_stride].src_mi->mbmi;
83
84
85
      different_ref_found = 1;

      if (candidate->ref_frame[0] == ref_frame)
86
        ADD_MV_REF_LIST(candidate->mv[0], refmv_count, mv_ref_list, Done);
87
88
89
90
91
92
93
94
95
96
97
    }
  }

  // Since we couldn't find 2 mvs from the same reference frame
  // go back through the neighbors and find motion vectors from
  // different reference frames.
  if (different_ref_found && !refmv_count) {
    for (i = 0; i < MVREF_NEIGHBOURS; ++i) {
      const POSITION *mv_ref = &mv_ref_search[i];
      if (is_inside(tile, mi_col, mi_row, cm->mi_rows, mv_ref)) {
        const MB_MODE_INFO *const candidate = &xd->mi[mv_ref->col + mv_ref->row
hkuang's avatar
hkuang committed
98
                                              * xd->mi_stride].src_mi->mbmi;
99
100

        // If the candidate is INTRA we don't want to consider its mv.
101
102
        IF_DIFF_REF_FRAME_ADD_MV(candidate, ref_frame, ref_sign_bias,
                                 refmv_count, mv_ref_list, Done);
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
      }
    }
  }

 Done:

  mi->mbmi.mode_context[ref_frame] = counter_to_context[context_counter];

  // Clamp vectors
  for (i = 0; i < MAX_MV_REF_CANDIDATES; ++i)
    clamp_mv_ref(&mv_ref_list[i].as_mv, xd);

  return const_motion;
}

118
119
120
121
static int combined_motion_search(VP9_COMP *cpi, MACROBLOCK *x,
                                  BLOCK_SIZE bsize, int mi_row, int mi_col,
                                  int_mv *tmp_mv, int *rate_mv,
                                  int64_t best_rd_sofar) {
122
  MACROBLOCKD *xd = &x->e_mbd;
hkuang's avatar
hkuang committed
123
  MB_MODE_INFO *mbmi = &xd->mi[0].src_mi->mbmi;
124
  struct buf_2d backup_yv12[MAX_MB_PLANE] = {{0, 0}};
125
126
  const int step_param = cpi->sf.mv.fullpel_search_step_param;
  const int sadpb = x->sadperbit16;
127
  MV mvp_full;
128
  const int ref = mbmi->ref_frame[0];
129
  const MV ref_mv = mbmi->ref_mvs[ref][0].as_mv;
130
131
132
133
134
135
136
  int dis;
  int rate_mode;
  const int tmp_col_min = x->mv_col_min;
  const int tmp_col_max = x->mv_col_max;
  const int tmp_row_min = x->mv_row_min;
  const int tmp_row_max = x->mv_row_max;
  int rv = 0;
137
  int cost_list[5];
138
139
  const YV12_BUFFER_CONFIG *scaled_ref_frame = vp9_get_scaled_ref_frame(cpi,
                                                                        ref);
140
141
142
143
144
145
146
  if (scaled_ref_frame) {
    int i;
    // Swap out the reference frame for a version that's been scaled to
    // match the resolution of the current frame, allowing the existing
    // motion search code to be used without additional modifications.
    for (i = 0; i < MAX_MB_PLANE; i++)
      backup_yv12[i] = xd->plane[i].pre[0];
147
    vp9_setup_pre_planes(xd, 0, scaled_ref_frame, mi_row, mi_col, NULL);
148
  }
149
  vp9_set_mv_search_range(x, &ref_mv);
150

Yaowu Xu's avatar
Yaowu Xu committed
151
152
153
154
  assert(x->mv_best_ref_index[ref] <= 2);
  if (x->mv_best_ref_index[ref] < 2)
    mvp_full = mbmi->ref_mvs[ref][x->mv_best_ref_index[ref]].as_mv;
  else
Dmitry Kovalev's avatar
Dmitry Kovalev committed
155
    mvp_full = x->pred_mv[ref];
156
157
158
159

  mvp_full.col >>= 3;
  mvp_full.row >>= 3;

160
  vp9_full_pixel_search(cpi, x, bsize, &mvp_full, step_param, sadpb,
161
                        cond_cost_list(cpi, cost_list),
162
                        &ref_mv, &tmp_mv->as_mv, INT_MAX, 0);
163

164
165
166
167
168
  x->mv_col_min = tmp_col_min;
  x->mv_col_max = tmp_col_max;
  x->mv_row_min = tmp_row_min;
  x->mv_row_max = tmp_row_max;

169
170
171
  // calculate the bit cost on motion vector
  mvp_full.row = tmp_mv->as_mv.row * 8;
  mvp_full.col = tmp_mv->as_mv.col * 8;
172

173
174
  *rate_mv = vp9_mv_bit_cost(&mvp_full, &ref_mv,
                             x->nmvjointcost, x->mvcost, MV_COST_WEIGHT);
175

176
177
178
179
180
181
182
183
184
185
186
187
  rate_mode = cpi->inter_mode_cost[mbmi->mode_context[ref]]
                                  [INTER_OFFSET(NEWMV)];
  rv = !(RDCOST(x->rdmult, x->rddiv, (*rate_mv + rate_mode), 0) >
         best_rd_sofar);

  if (rv) {
    cpi->find_fractional_mv_step(x, &tmp_mv->as_mv, &ref_mv,
                                 cpi->common.allow_high_precision_mv,
                                 x->errorperbit,
                                 &cpi->fn_ptr[bsize],
                                 cpi->sf.mv.subpel_force_stop,
                                 cpi->sf.mv.subpel_iters_per_step,
188
                                 cond_cost_list(cpi, cost_list),
189
                                 x->nmvjointcost, x->mvcost,
190
                                 &dis, &x->pred_sse[ref], NULL, 0, 0);
191
192
193
194
195
196
197
  }

  if (scaled_ref_frame) {
    int i;
    for (i = 0; i < MAX_MB_PLANE; i++)
      xd->plane[i].pre[0] = backup_yv12[i];
  }
198
  return rv;
199
200
}

201

202
203
static void model_rd_for_sb_y(VP9_COMP *cpi, BLOCK_SIZE bsize,
                              MACROBLOCK *x, MACROBLOCKD *xd,
204
205
                              int *out_rate_sum, int64_t *out_dist_sum,
                              unsigned int *var_y, unsigned int *sse_y) {
206
207
208
209
  // Note our transform coeffs are 8 times an orthogonal transform.
  // Hence quantizer step is also 8 times. To get effective quantizer
  // we need to divide by 8 before sending to modeling function.
  unsigned int sse;
210
211
  int rate;
  int64_t dist;
212
213
  struct macroblock_plane *const p = &x->plane[0];
  struct macroblockd_plane *const pd = &xd->plane[0];
214
215
  const int64_t dc_thr = p->quant_thred[0] >> 6;
  const int64_t ac_thr = p->quant_thred[1] >> 6;
216
217
  const uint32_t dc_quant = pd->dequant[0];
  const uint32_t ac_quant = pd->dequant[1];
218
219
  unsigned int var = cpi->fn_ptr[bsize].vf(p->src.buf, p->src.stride,
                                           pd->dst.buf, pd->dst.stride, &sse);
220
221
  int skip_dc = 0;

222
223
224
  *var_y = var;
  *sse_y = sse;

225
226
  if (cpi->common.tx_mode == TX_MODE_SELECT) {
    if (sse > (var << 2))
hkuang's avatar
hkuang committed
227
      xd->mi[0].src_mi->mbmi.tx_size =
228
229
          MIN(max_txsize_lookup[bsize],
              tx_mode_to_biggest_tx_size[cpi->common.tx_mode]);
230
    else
hkuang's avatar
hkuang committed
231
      xd->mi[0].src_mi->mbmi.tx_size = TX_8X8;
232

233
234
    if (cpi->sf.partition_search_type == VAR_BASED_PARTITION) {
      if (cpi->oxcf.aq_mode == CYCLIC_REFRESH_AQ &&
235
          cyclic_refresh_segment_id_boosted(xd->mi[0].src_mi->mbmi.segment_id))
236
237
238
239
        xd->mi[0].src_mi->mbmi.tx_size = TX_8X8;
      else if (xd->mi[0].src_mi->mbmi.tx_size > TX_16X16)
        xd->mi[0].src_mi->mbmi.tx_size = TX_16X16;
    }
240
  } else {
hkuang's avatar
hkuang committed
241
    xd->mi[0].src_mi->mbmi.tx_size =
242
243
        MIN(max_txsize_lookup[bsize],
            tx_mode_to_biggest_tx_size[cpi->common.tx_mode]);
244
245
  }

246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
  // Evaluate if the partition block is a skippable block in Y plane.
  {
    const BLOCK_SIZE unit_size =
        txsize_to_bsize[xd->mi[0].src_mi->mbmi.tx_size];
    const unsigned int num_blk_log2 =
        (b_width_log2_lookup[bsize] - b_width_log2_lookup[unit_size]) +
        (b_height_log2_lookup[bsize] - b_height_log2_lookup[unit_size]);
    const unsigned int sse_tx = sse >> num_blk_log2;
    const unsigned int var_tx = var >> num_blk_log2;

    x->skip_txfm[0] = 0;
    // Check if all ac coefficients can be quantized to zero.
    if (var_tx < ac_thr || var == 0) {
      x->skip_txfm[0] = 2;
      // Check if dc coefficient can be quantized to zero.
      if (sse_tx - var_tx < dc_thr || sse == var)
        x->skip_txfm[0] = 1;
263
264
265
    } else {
      if (sse_tx - var_tx < dc_thr || sse == var)
        skip_dc = 1;
266
267
268
269
270
271
272
273
274
    }
  }

  if (x->skip_txfm[0] == 1) {
    *out_rate_sum = 0;
    *out_dist_sum = sse << 4;
    return;
  }

275
  if (!skip_dc) {
276
#if CONFIG_VP9_HIGHBITDEPTH
277
278
279
280
281
282
283
284
    if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
      vp9_model_rd_from_var_lapndz(sse - var, num_pels_log2_lookup[bsize],
                                   dc_quant >> (xd->bd - 5), &rate, &dist);
    } else {
      vp9_model_rd_from_var_lapndz(sse - var, num_pels_log2_lookup[bsize],
                                   dc_quant >> 3, &rate, &dist);
    }
#else
285
    vp9_model_rd_from_var_lapndz(sse - var, num_pels_log2_lookup[bsize],
286
287
                                 dc_quant >> 3, &rate, &dist);
#endif  // CONFIG_VP9_HIGHBITDEPTH
288
  }
289

290
291
292
293
294
295
296
  if (!skip_dc) {
    *out_rate_sum = rate >> 1;
    *out_dist_sum = dist << 3;
  } else {
    *out_rate_sum = 0;
    *out_dist_sum = (sse - var) << 4;
  }
297

298
299
#if CONFIG_VP9_HIGHBITDEPTH
  if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
300
    vp9_model_rd_from_var_lapndz(var, num_pels_log2_lookup[bsize],
301
                                 ac_quant >> (xd->bd - 5), &rate, &dist);
302
  } else {
303
    vp9_model_rd_from_var_lapndz(var, num_pels_log2_lookup[bsize],
304
                                 ac_quant >> 3, &rate, &dist);
305
306
  }
#else
307
  vp9_model_rd_from_var_lapndz(var, num_pels_log2_lookup[bsize],
308
                               ac_quant >> 3, &rate, &dist);
309
310
#endif  // CONFIG_VP9_HIGHBITDEPTH

311
312
  *out_rate_sum += rate;
  *out_dist_sum += dist << 4;
313
314
}

315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
static void model_rd_for_sb_uv(VP9_COMP *cpi, BLOCK_SIZE bsize,
                               MACROBLOCK *x, MACROBLOCKD *xd,
                               int *out_rate_sum, int64_t *out_dist_sum,
                               unsigned int *var_y, unsigned int *sse_y) {
  // Note our transform coeffs are 8 times an orthogonal transform.
  // Hence quantizer step is also 8 times. To get effective quantizer
  // we need to divide by 8 before sending to modeling function.
  unsigned int sse;
  int rate;
  int64_t dist;
  int i;

  *out_rate_sum = 0;
  *out_dist_sum = 0;

  for (i = 1; i <= 2; ++i) {
    struct macroblock_plane *const p = &x->plane[i];
    struct macroblockd_plane *const pd = &xd->plane[i];
    const uint32_t dc_quant = pd->dequant[0];
    const uint32_t ac_quant = pd->dequant[1];
    const BLOCK_SIZE bs = get_plane_block_size(bsize, pd);
    unsigned int var;

    if (!x->color_sensitivity[i - 1])
      continue;

    var = cpi->fn_ptr[bs].vf(p->src.buf, p->src.stride,
                             pd->dst.buf, pd->dst.stride, &sse);
    *var_y += var;
    *sse_y += sse;

  #if CONFIG_VP9_HIGHBITDEPTH
    if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
      vp9_model_rd_from_var_lapndz(sse - var, num_pels_log2_lookup[bs],
                                   dc_quant >> (xd->bd - 5), &rate, &dist);
    } else {
      vp9_model_rd_from_var_lapndz(sse - var, num_pels_log2_lookup[bs],
                                   dc_quant >> 3, &rate, &dist);
    }
  #else
    vp9_model_rd_from_var_lapndz(sse - var, num_pels_log2_lookup[bs],
                                 dc_quant >> 3, &rate, &dist);
  #endif  // CONFIG_VP9_HIGHBITDEPTH

    *out_rate_sum += rate >> 1;
    *out_dist_sum += dist << 3;

  #if CONFIG_VP9_HIGHBITDEPTH
    if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
      vp9_model_rd_from_var_lapndz(var, num_pels_log2_lookup[bs],
                                   ac_quant >> (xd->bd - 5), &rate, &dist);
    } else {
      vp9_model_rd_from_var_lapndz(var, num_pels_log2_lookup[bs],
                                   ac_quant >> 3, &rate, &dist);
    }
  #else
    vp9_model_rd_from_var_lapndz(var, num_pels_log2_lookup[bs],
                                 ac_quant >> 3, &rate, &dist);
  #endif  // CONFIG_VP9_HIGHBITDEPTH

    *out_rate_sum += rate;
    *out_dist_sum += dist << 4;
  }
}

380
381
382
383
384
385
386
387
388
389
390
391
392
static int get_pred_buffer(PRED_BUFFER *p, int len) {
  int i;

  for (i = 0; i < len; i++) {
    if (!p[i].in_use) {
      p[i].in_use = 1;
      return i;
    }
  }
  return -1;
}

static void free_pred_buffer(PRED_BUFFER *p) {
393
394
  if (p != NULL)
    p->in_use = 0;
395
396
}

397
398
399
400
401
402
403
404
static void encode_breakout_test(VP9_COMP *cpi, MACROBLOCK *x,
                                 BLOCK_SIZE bsize, int mi_row, int mi_col,
                                 MV_REFERENCE_FRAME ref_frame,
                                 PREDICTION_MODE this_mode,
                                 unsigned int var_y, unsigned int sse_y,
                                 struct buf_2d yv12_mb[][MAX_MB_PLANE],
                                 int *rate, int64_t *dist) {
  MACROBLOCKD *xd = &x->e_mbd;
hkuang's avatar
hkuang committed
405
  MB_MODE_INFO *mbmi = &xd->mi[0].src_mi->mbmi;
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420

  const BLOCK_SIZE uv_size = get_plane_block_size(bsize, &xd->plane[1]);
  unsigned int var = var_y, sse = sse_y;
  // Skipping threshold for ac.
  unsigned int thresh_ac;
  // Skipping threshold for dc.
  unsigned int thresh_dc;
  if (x->encode_breakout > 0) {
    // Set a maximum for threshold to avoid big PSNR loss in low bit rate
    // case. Use extreme low threshold for static frames to limit
    // skipping.
    const unsigned int max_thresh = 36000;
    // The encode_breakout input
    const unsigned int min_thresh =
        MIN(((unsigned int)x->encode_breakout << 4), max_thresh);
421
#if CONFIG_VP9_HIGHBITDEPTH
422
    const int shift = (xd->bd << 1) - 16;
423
#endif
424
425

    // Calculate threshold according to dequant value.
426
    thresh_ac = (xd->plane[0].dequant[1] * xd->plane[0].dequant[1]) >> 3;
427
428
429
430
431
#if CONFIG_VP9_HIGHBITDEPTH
    if ((xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) && shift > 0) {
      thresh_ac = ROUND_POWER_OF_TWO(thresh_ac, shift);
    }
#endif  // CONFIG_VP9_HIGHBITDEPTH
432
433
434
435
    thresh_ac = clamp(thresh_ac, min_thresh, max_thresh);

    // Adjust ac threshold according to partition size.
    thresh_ac >>=
436
        8 - (b_width_log2_lookup[bsize] + b_height_log2_lookup[bsize]);
437
438

    thresh_dc = (xd->plane[0].dequant[0] * xd->plane[0].dequant[0] >> 6);
439
440
441
442
443
#if CONFIG_VP9_HIGHBITDEPTH
    if ((xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) && shift > 0) {
      thresh_dc = ROUND_POWER_OF_TWO(thresh_dc, shift);
    }
#endif  // CONFIG_VP9_HIGHBITDEPTH
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
  } else {
    thresh_ac = 0;
    thresh_dc = 0;
  }

  // Y skipping condition checking for ac and dc.
  if (var <= thresh_ac && (sse - var) <= thresh_dc) {
    unsigned int sse_u, sse_v;
    unsigned int var_u, var_v;

    // Skip UV prediction unless breakout is zero (lossless) to save
    // computation with low impact on the result
    if (x->encode_breakout == 0) {
      xd->plane[1].pre[0] = yv12_mb[ref_frame][1];
      xd->plane[2].pre[0] = yv12_mb[ref_frame][2];
      vp9_build_inter_predictors_sbuv(xd, mi_row, mi_col, bsize);
    }

    var_u = cpi->fn_ptr[uv_size].vf(x->plane[1].src.buf,
                                    x->plane[1].src.stride,
                                    xd->plane[1].dst.buf,
                                    xd->plane[1].dst.stride, &sse_u);

    // U skipping condition checking
468
    if (((var_u << 2) <= thresh_ac) && (sse_u - var_u <= thresh_dc)) {
469
470
471
472
473
474
      var_v = cpi->fn_ptr[uv_size].vf(x->plane[2].src.buf,
                                      x->plane[2].src.stride,
                                      xd->plane[2].dst.buf,
                                      xd->plane[2].dst.stride, &sse_v);

      // V skipping condition checking
475
      if (((var_v << 2) <= thresh_ac) && (sse_v - var_v <= thresh_dc)) {
476
477
478
479
        x->skip = 1;

        // The cost of skip bit needs to be added.
        *rate = cpi->inter_mode_cost[mbmi->mode_context[ref_frame]]
480
                                    [INTER_OFFSET(this_mode)];
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496

        // More on this part of rate
        // rate += vp9_cost_bit(vp9_get_skip_prob(cm, xd), 1);

        // Scaling factor for SSE from spatial domain to frequency
        // domain is 16. Adjust distortion accordingly.
        // TODO(yunqingwang): In this function, only y-plane dist is
        // calculated.
        *dist = (sse << 4);  // + ((sse_u + sse_v) << 4);

        // *disable_skip = 1;
      }
    }
  }
}

497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
struct estimate_block_intra_args {
  VP9_COMP *cpi;
  MACROBLOCK *x;
  PREDICTION_MODE mode;
  int rate;
  int64_t dist;
};

static void estimate_block_intra(int plane, int block, BLOCK_SIZE plane_bsize,
                                 TX_SIZE tx_size, void *arg) {
  struct estimate_block_intra_args* const args = arg;
  VP9_COMP *const cpi = args->cpi;
  MACROBLOCK *const x = args->x;
  MACROBLOCKD *const xd = &x->e_mbd;
  struct macroblock_plane *const p = &x->plane[0];
  struct macroblockd_plane *const pd = &xd->plane[0];
  const BLOCK_SIZE bsize_tx = txsize_to_bsize[tx_size];
  uint8_t *const src_buf_base = p->src.buf;
  uint8_t *const dst_buf_base = pd->dst.buf;
  const int src_stride = p->src.stride;
  const int dst_stride = pd->dst.stride;
  int i, j;
  int rate;
  int64_t dist;
  unsigned int var_y, sse_y;
  txfrm_block_to_raster_xy(plane_bsize, tx_size, block, &i, &j);
  assert(plane == 0);
  (void) plane;

  p->src.buf = &src_buf_base[4 * (j * src_stride + i)];
  pd->dst.buf = &dst_buf_base[4 * (j * dst_stride + i)];
  // Use source buffer as an approximation for the fully reconstructed buffer.
  vp9_predict_intra_block(xd, block >> (2 * tx_size),
530
                          b_width_log2_lookup[plane_bsize],
531
                          tx_size, args->mode,
532
533
                          x->skip_encode ? p->src.buf : pd->dst.buf,
                          x->skip_encode ? src_stride : dst_stride,
534
535
536
537
538
539
540
541
542
543
                          pd->dst.buf, dst_stride,
                          i, j, 0);
  // This procedure assumes zero offset from p->src.buf and pd->dst.buf.
  model_rd_for_sb_y(cpi, bsize_tx, x, xd, &rate, &dist, &var_y, &sse_y);
  p->src.buf = src_buf_base;
  pd->dst.buf = dst_buf_base;
  args->rate += rate;
  args->dist += dist;
}

544
static const THR_MODES mode_idx[MAX_REF_FRAMES - 1][4] = {
545
  {THR_DC, THR_H_PRED, THR_V_PRED, THR_TM},
546
547
548
549
  {THR_NEARESTMV, THR_NEARMV, THR_ZEROMV, THR_NEWMV},
  {THR_NEARESTG, THR_NEARG, THR_ZEROG, THR_NEWG},
};

550
551
552
553
static const PREDICTION_MODE intra_mode_list[] = {
  DC_PRED, V_PRED, H_PRED, TM_PRED
};

554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
void vp9_pick_intra_mode(VP9_COMP *cpi, MACROBLOCK *x, RD_COST *rd_cost,
                         BLOCK_SIZE bsize, PICK_MODE_CONTEXT *ctx) {
  MACROBLOCKD *const xd = &x->e_mbd;
  MB_MODE_INFO *const mbmi = &xd->mi[0].src_mi->mbmi;
  RD_COST this_rdc, best_rdc;
  PREDICTION_MODE this_mode;
  struct estimate_block_intra_args args = { cpi, x, DC_PRED, 0, 0 };
  const TX_SIZE intra_tx_size =
      MIN(max_txsize_lookup[bsize],
          tx_mode_to_biggest_tx_size[cpi->common.tx_mode]);
  MODE_INFO *const mic = xd->mi[0].src_mi;
  int *bmode_costs;
  const MODE_INFO *above_mi = xd->mi[-xd->mi_stride].src_mi;
  const MODE_INFO *left_mi = xd->left_available ? xd->mi[-1].src_mi : NULL;
  const PREDICTION_MODE A = vp9_above_block_mode(mic, above_mi, 0);
  const PREDICTION_MODE L = vp9_left_block_mode(mic, left_mi, 0);
  bmode_costs = cpi->y_mode_costs[A][L];

  (void) ctx;
  vp9_rd_cost_reset(&best_rdc);
  vp9_rd_cost_reset(&this_rdc);

  mbmi->ref_frame[0] = INTRA_FRAME;
  mbmi->mv[0].as_int = INVALID_MV;
  mbmi->uv_mode = DC_PRED;
  vpx_memset(x->skip_txfm, 0, sizeof(x->skip_txfm));

  // Change the limit of this loop to add other intra prediction
  // mode tests.
  for (this_mode = DC_PRED; this_mode <= H_PRED; ++this_mode) {
    args.mode = this_mode;
    args.rate = 0;
    args.dist = 0;
    mbmi->tx_size = intra_tx_size;
    vp9_foreach_transformed_block_in_plane(xd, bsize, 0,
                                           estimate_block_intra, &args);
    this_rdc.rate = args.rate;
    this_rdc.dist = args.dist;
    this_rdc.rate += bmode_costs[this_mode];
    this_rdc.rdcost = RDCOST(x->rdmult, x->rddiv,
                             this_rdc.rate, this_rdc.dist);

    if (this_rdc.rdcost < best_rdc.rdcost) {
      best_rdc = this_rdc;
      mbmi->mode = this_mode;
    }
  }

  *rd_cost = best_rdc;
}

605
606
607
static const int ref_frame_cost[MAX_REF_FRAMES] = {
    1235, 229, 530, 615,
};
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625

typedef struct {
  MV_REFERENCE_FRAME ref_frame;
  PREDICTION_MODE pred_mode;
} REF_MODE;

#define RT_INTER_MODES 8
static const REF_MODE ref_mode_set[RT_INTER_MODES] = {
    {LAST_FRAME, ZEROMV},
    {LAST_FRAME, NEARESTMV},
    {LAST_FRAME, NEARMV},
    {LAST_FRAME, NEWMV},
    {GOLDEN_FRAME, ZEROMV},
    {GOLDEN_FRAME, NEARESTMV},
    {GOLDEN_FRAME, NEARMV},
    {GOLDEN_FRAME, NEWMV}
};

626
627
// TODO(jingning) placeholder for inter-frame non-RD mode decision.
// this needs various further optimizations. to be continued..
628
void vp9_pick_inter_mode(VP9_COMP *cpi, MACROBLOCK *x,
629
                         TileDataEnc *tile_data,
630
631
                         int mi_row, int mi_col, RD_COST *rd_cost,
                         BLOCK_SIZE bsize, PICK_MODE_CONTEXT *ctx) {
632
  VP9_COMMON *const cm = &cpi->common;
633
  TileInfo *const tile_info = &tile_data->tile_info;
634
  MACROBLOCKD *const xd = &x->e_mbd;
hkuang's avatar
hkuang committed
635
  MB_MODE_INFO *const mbmi = &xd->mi[0].src_mi->mbmi;
636
  struct macroblockd_plane *const pd = &xd->plane[0];
637
  PREDICTION_MODE best_mode = ZEROMV;
638
  MV_REFERENCE_FRAME ref_frame, best_ref_frame = LAST_FRAME;
639
  TX_SIZE best_tx_size = TX_SIZES;
640
  INTERP_FILTER best_pred_filter = EIGHTTAP;
641
642
643
644
  int_mv frame_mv[MB_MODE_COUNT][MAX_REF_FRAMES];
  struct buf_2d yv12_mb[4][MAX_MB_PLANE];
  static const int flag_list[4] = { 0, VP9_LAST_FLAG, VP9_GOLD_FLAG,
                                    VP9_ALT_FLAG };
645
  RD_COST this_rdc, best_rdc;
646
  uint8_t skip_txfm = 0, best_mode_skip_txfm = 0;
647
648
649
  // var_y and sse_y are saved to be used in skipping checking
  unsigned int var_y = UINT_MAX;
  unsigned int sse_y = UINT_MAX;
650
651
652
  // Reduce the intra cost penalty for small blocks (<=16x16).
  const int reduction_fac =
      (cpi->sf.partition_search_type == VAR_BASED_PARTITION &&
653
       bsize <= BLOCK_16X16) ? 2 : 0;
654
  const int intra_cost_penalty = vp9_get_intra_cost_penalty(
655
      cm->base_qindex, cm->y_dc_delta_q, cm->bit_depth) >> reduction_fac;
656
657
  const int64_t inter_mode_thresh = RDCOST(x->rdmult, x->rddiv,
                                           intra_cost_penalty, 0);
658
  const int *const rd_threshes = cpi->rd.threshes[mbmi->segment_id][bsize];
659
  const int *const rd_thresh_freq_fact = tile_data->thresh_freq_fact[bsize];
Yaowu Xu's avatar
Yaowu Xu committed
660
  INTERP_FILTER filter_ref;
661
  const int bsl = mi_width_log2_lookup[bsize];
662
  const int pred_filter_search = cm->interp_filter == SWITCHABLE ?
663
664
      (((mi_row + mi_col) >> bsl) +
       get_chessboard_index(cm->current_video_frame)) & 0x1 : 0;
665
  int const_motion[MAX_REF_FRAMES] = { 0 };
666
667
  const int bh = num_4x4_blocks_high_lookup[bsize] << 2;
  const int bw = num_4x4_blocks_wide_lookup[bsize] << 2;
668
669
  // For speed 6, the result of interp filter is reused later in actual encoding
  // process.
670
671
672
  // tmp[3] points to dst buffer, and the other 3 point to allocated buffers.
  PRED_BUFFER tmp[4];
  DECLARE_ALIGNED_ARRAY(16, uint8_t, pred_buf, 3 * 64 * 64);
673
674
675
#if CONFIG_VP9_HIGHBITDEPTH
  DECLARE_ALIGNED_ARRAY(16, uint16_t, pred_buf_16, 3 * 64 * 64);
#endif
676
677
678
  struct buf_2d orig_dst = pd->dst;
  PRED_BUFFER *best_pred = NULL;
  PRED_BUFFER *this_mode_pred = NULL;
679
  const int pixels_in_block = bh * bw;
680
  int reuse_inter_pred = cpi->sf.reuse_inter_pred_sby && ctx->pred_pixel_ready;
681
  int ref_frame_skip_mask = 0;
682
  int idx;
683

684
  if (reuse_inter_pred) {
685
    int i;
686
    for (i = 0; i < 3; i++) {
687
688
689
690
691
692
693
694
#if CONFIG_VP9_HIGHBITDEPTH
      if (cm->use_highbitdepth)
        tmp[i].data = CONVERT_TO_BYTEPTR(&pred_buf_16[pixels_in_block * i]);
      else
        tmp[i].data = &pred_buf[pixels_in_block * i];
#else
      tmp[i].data = &pred_buf[pixels_in_block * i];
#endif  // CONFIG_VP9_HIGHBITDEPTH
695
696
697
698
699
700
701
702
      tmp[i].stride = bw;
      tmp[i].in_use = 0;
    }
    tmp[3].data = pd->dst.buf;
    tmp[3].stride = pd->dst.stride;
    tmp[3].in_use = 0;
  }

703
704
  x->skip_encode = cpi->sf.skip_encode_frame && x->q_index < QIDX_SKIP_THRESH;
  x->skip = 0;
705

Yaowu Xu's avatar
Yaowu Xu committed
706
707
708
709
710
711
712
  if (xd->up_available)
    filter_ref = xd->mi[-xd->mi_stride].src_mi->mbmi.interp_filter;
  else if (xd->left_available)
    filter_ref = xd->mi[-1].src_mi->mbmi.interp_filter;
  else
    filter_ref = cm->interp_filter;

713
  // initialize mode decisions
714
715
  vp9_rd_cost_reset(&best_rdc);
  vp9_rd_cost_reset(rd_cost);
716
717
718
719
  mbmi->sb_type = bsize;
  mbmi->ref_frame[0] = NONE;
  mbmi->ref_frame[1] = NONE;
  mbmi->tx_size = MIN(max_txsize_lookup[bsize],
720
721
722
                      tx_mode_to_biggest_tx_size[cm->tx_mode]);
  mbmi->interp_filter = cm->interp_filter == SWITCHABLE ?
                        EIGHTTAP : cm->interp_filter;
723

724
725
726
727
#if CONFIG_VP9_TEMPORAL_DENOISING
  vp9_denoiser_reset_frame_stats(ctx);
#endif

728
  for (ref_frame = LAST_FRAME; ref_frame <= GOLDEN_FRAME; ++ref_frame) {
729
730
    const YV12_BUFFER_CONFIG *yv12 = get_ref_frame_buffer(cpi, ref_frame);

731
    x->pred_mv_sad[ref_frame] = INT_MAX;
732
733
734
    frame_mv[NEWMV][ref_frame].as_int = INVALID_MV;
    frame_mv[ZEROMV][ref_frame].as_int = 0;

735
    if ((cpi->ref_frame_flags & flag_list[ref_frame]) && (yv12 != NULL)) {
736
737
      int_mv *const candidates = mbmi->ref_mvs[ref_frame];
      const struct scale_factors *const sf = &cm->frame_refs[ref_frame - 1].sf;
738

739
740
741
      vp9_setup_pred_block(xd, yv12_mb[ref_frame], yv12, mi_row, mi_col,
                           sf, sf);

742
      if (cm->use_prev_frame_mvs)
743
        vp9_find_mv_refs(cm, xd, tile_info, xd->mi[0].src_mi, ref_frame,
744
                         candidates, mi_row, mi_col, NULL, NULL);
745
      else
746
747
        const_motion[ref_frame] = mv_refs_rt(cm, xd, tile_info,
                                             xd->mi[0].src_mi,
748
749
750
751
752
753
754
755
756
757
                                             ref_frame, candidates,
                                             mi_row, mi_col);

      vp9_find_best_ref_mvs(xd, cm->allow_high_precision_mv, candidates,
                            &frame_mv[NEARESTMV][ref_frame],
                            &frame_mv[NEARMV][ref_frame]);

      if (!vp9_is_scaled(sf) && bsize >= BLOCK_8X8)
        vp9_mv_pred(cpi, x, yv12_mb[ref_frame][0].buf, yv12->y_stride,
                    ref_frame, bsize);
758
    } else {
759
      ref_frame_skip_mask |= (1 << ref_frame);
760
    }
761
762
  }

763
764
765
  if (cpi->rc.frames_since_golden == 0)
    ref_frame_skip_mask |= (1 << GOLDEN_FRAME);

766
767
768
769
770
771
772
773
774
775
776
  for (idx = 0; idx < RT_INTER_MODES; ++idx) {
    int rate_mv = 0;
    int mode_rd_thresh;
    int mode_index;
    int i;
    PREDICTION_MODE this_mode = ref_mode_set[idx].pred_mode;

    ref_frame = ref_mode_set[idx].ref_frame;
    mode_index = mode_idx[ref_frame][INTER_OFFSET(this_mode)];

    i = (ref_frame == LAST_FRAME) ? GOLDEN_FRAME : LAST_FRAME;
777
778
779
780
781
782
783
784
785
786

    if (!(cpi->ref_frame_flags & flag_list[ref_frame]))
      continue;

    if (cpi->ref_frame_flags & flag_list[i])
      if (x->pred_mv_sad[ref_frame] > (x->pred_mv_sad[i] << 1))
        ref_frame_skip_mask |= (1 << ref_frame);

    if (ref_frame_skip_mask & (1 << ref_frame))
      continue;
787
788

    // Select prediction reference frames.
789
790
    for (i = 0; i < MAX_MB_PLANE; i++)
      xd->plane[i].pre[0] = yv12_mb[ref_frame][i];
791

792
793
794
    clamp_mv2(&frame_mv[NEARESTMV][ref_frame].as_mv, xd);
    clamp_mv2(&frame_mv[NEARMV][ref_frame].as_mv, xd);

795
    mbmi->ref_frame[0] = ref_frame;
796
    set_ref_ptrs(cm, xd, ref_frame, NONE);
797

798
799
    if (const_motion[ref_frame] && this_mode == NEARMV)
      continue;
800

801
802
    if (!(cpi->sf.inter_mode_mask[bsize] & (1 << this_mode)))
      continue;
803

804
805
806
807
808
    mode_rd_thresh = best_mode_skip_txfm ?
            rd_threshes[mode_index] << 1 : rd_threshes[mode_index];
    if (rd_less_than_thresh(best_rdc.rdcost, mode_rd_thresh,
                            rd_thresh_freq_fact[mode_index]))
      continue;
Jim Bankoski's avatar
Jim Bankoski committed
809

810
811
812
    if (this_mode == NEWMV) {
      if (cpi->sf.partition_search_type != VAR_BASED_PARTITION
          && best_rdc.rdcost < (int64_t) (1 << num_pels_log2_lookup[bsize]))
813
        continue;
814
815
816
      if (ref_frame > LAST_FRAME) {
        int tmp_sad;
        int dis, cost_list[5];
817

818
        if (bsize < BLOCK_16X16)
819
          continue;
820

821
822
        tmp_sad = vp9_int_pro_motion_estimation(cpi, x, bsize);
        if (tmp_sad > x->pred_mv_sad[LAST_FRAME])
Jim Bankoski's avatar
Jim Bankoski committed
823
824
          continue;

825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
        frame_mv[NEWMV][ref_frame].as_int = mbmi->mv[0].as_int;
        rate_mv = vp9_mv_bit_cost(&frame_mv[NEWMV][ref_frame].as_mv,
          &mbmi->ref_mvs[ref_frame][0].as_mv,
          x->nmvjointcost, x->mvcost, MV_COST_WEIGHT);
        frame_mv[NEWMV][ref_frame].as_mv.row >>= 3;
        frame_mv[NEWMV][ref_frame].as_mv.col >>= 3;

        cpi->find_fractional_mv_step(x, &frame_mv[NEWMV][ref_frame].as_mv,
          &mbmi->ref_mvs[ref_frame][0].as_mv,
          cpi->common.allow_high_precision_mv,
          x->errorperbit,
          &cpi->fn_ptr[bsize],
          cpi->sf.mv.subpel_force_stop,
          cpi->sf.mv.subpel_iters_per_step,
          cond_cost_list(cpi, cost_list),
          x->nmvjointcost, x->mvcost, &dis,
          &x->pred_sse[ref_frame], NULL, 0, 0);
      } else if (!combined_motion_search(cpi, x, bsize, mi_row, mi_col,
        &frame_mv[NEWMV][ref_frame], &rate_mv, best_rdc.rdcost)) {
844
        continue;
845
846
      }
    }
847

848
849
850
    if (this_mode != NEARESTMV && frame_mv[this_mode][ref_frame].as_int ==
        frame_mv[NEARESTMV][ref_frame].as_int)
      continue;
851

852
853
854
855
856
857
858
859
860
861
862
863
864
    mbmi->mode = this_mode;
    mbmi->mv[0].as_int = frame_mv[this_mode][ref_frame].as_int;

    // Search for the best prediction filter type, when the resulting
    // motion vector is at sub-pixel accuracy level for luma component, i.e.,
    // the last three bits are all zeros.
    if (reuse_inter_pred) {
      if (!this_mode_pred) {
        this_mode_pred = &tmp[3];
      } else {
        this_mode_pred = &tmp[get_pred_buffer(tmp, 3)];
        pd->dst.buf = this_mode_pred->data;
        pd->dst.stride = bw;
865
      }
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
    }

    if ((this_mode == NEWMV || filter_ref == SWITCHABLE) && pred_filter_search
        && (ref_frame == LAST_FRAME)
        && (((mbmi->mv[0].as_mv.row | mbmi->mv[0].as_mv.col) & 0x07) != 0)) {
      int pf_rate[3];
      int64_t pf_dist[3];
      unsigned int pf_var[3];
      unsigned int pf_sse[3];
      TX_SIZE pf_tx_size[3];
      int64_t best_cost = INT64_MAX;
      INTERP_FILTER best_filter = SWITCHABLE, filter;
      PRED_BUFFER *current_pred = this_mode_pred;

      for (filter = EIGHTTAP; filter <= EIGHTTAP_SHARP; ++filter) {
        int64_t cost;
        mbmi->interp_filter = filter;
        vp9_build_inter_predictors_sby(xd, mi_row, mi_col, bsize);
        model_rd_for_sb_y(cpi, bsize, x, xd, &pf_rate[filter], &pf_dist[filter],
                          &pf_var[filter], &pf_sse[filter]);
        pf_rate[filter] += vp9_get_switchable_rate(cpi, xd);
        cost = RDCOST(x->rdmult, x->rddiv, pf_rate[filter], pf_dist[filter]);
        pf_tx_size[filter] = mbmi->tx_size;
        if (cost < best_cost) {
          best_filter = filter;
          best_cost = cost;
          skip_txfm = x->skip_txfm[0];

          if (reuse_inter_pred) {
            if (this_mode_pred != current_pred) {
              free_pred_buffer(this_mode_pred);
              this_mode_pred = current_pred;
            }
899

900
901
902
903
            if (filter < EIGHTTAP_SHARP) {
              current_pred = &tmp[get_pred_buffer(tmp, 3)];
              pd->dst.buf = current_pred->data;
              pd->dst.stride = bw;
904
            }
905
          }
906
        }
907
      }
908

909
910
      if (reuse_inter_pred && this_mode_pred != current_pred)
        free_pred_buffer(current_pred);
911

912
      mbmi->interp_filter = best_filter;
913
914
915
916
917
      mbmi->tx_size = pf_tx_size[best_filter];
      this_rdc.rate = pf_rate[best_filter];
      this_rdc.dist = pf_dist[best_filter];
      var_y = pf_var[best_filter];
      sse_y = pf_sse[best_filter];
918
919
920
921
922
923
924
925
926
927
      x->skip_txfm[0] = skip_txfm;
    } else {
      mbmi->interp_filter = (filter_ref == SWITCHABLE) ? EIGHTTAP : filter_ref;
      vp9_build_inter_predictors_sby(xd, mi_row, mi_col, bsize);
      model_rd_for_sb_y(cpi, bsize, x, xd, &this_rdc.rate, &this_rdc.dist,
                        &var_y, &sse_y);
      this_rdc.rate +=
          cm->interp_filter == SWITCHABLE ?
              vp9_get_switchable_rate(cpi, xd) : 0;
    }
928

929
930
931
932
933
934
935
936
937
938
939
940
    // chroma component rate-distortion cost modeling
    if (x->color_sensitivity[0] || x->color_sensitivity[1]) {
      int uv_rate = 0;
      int64_t uv_dist = 0;
      if (x->color_sensitivity[0])
        vp9_build_inter_predictors_sbp(xd, mi_row, mi_col, bsize, 1);
      if (x->color_sensitivity[1])
        vp9_build_inter_predictors_sbp(xd, mi_row, mi_col, bsize, 2);
      model_rd_for_sb_uv(cpi, bsize, x, xd, &uv_rate, &uv_dist, &var_y, &sse_y);
      this_rdc.rate += uv_rate;
      this_rdc.dist += uv_dist;
    }
941

942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
    this_rdc.rate += rate_mv;
    this_rdc.rate +=
        cpi->inter_mode_cost[mbmi->mode_context[ref_frame]][INTER_OFFSET(
            this_mode)];
    this_rdc.rate += ref_frame_cost[ref_frame];
    this_rdc.rdcost = RDCOST(x->rdmult, x->rddiv, this_rdc.rate, this_rdc.dist);

    // Skipping checking: test to see if this block can be reconstructed by
    // prediction only.
    if (cpi->allow_encode_breakout) {
      encode_breakout_test(cpi, x, bsize, mi_row, mi_col, ref_frame, this_mode,
                           var_y, sse_y, yv12_mb, &this_rdc.rate,
                           &this_rdc.dist);
      if (x->skip) {
        this_rdc.rate += rate_mv;
        this_rdc.rdcost = RDCOST(x->rdmult, x->rddiv, this_rdc.rate,
                                 this_rdc.dist);
959
      }
960
    }
961

962
#if CONFIG_VP9_TEMPORAL_DENOISING
963
964
    if (cpi->oxcf.noise_sensitivity > 0)
      vp9_denoiser_update_frame_stats(mbmi, sse_y, this_mode, ctx);
965
#else
966
    (void)ctx;
Tim Kopp's avatar
Tim Kopp committed
967
968
#endif

969
970
971
972
973
974
975
    if (this_rdc.rdcost < best_rdc.rdcost || x->skip) {
      best_rdc = this_rdc;
      best_mode = this_mode;
      best_pred_filter = mbmi->interp_filter;
      best_tx_size = mbmi->tx_size;
      best_ref_frame = ref_frame;
      best_mode_skip_txfm = x->skip_txfm[0];
976

977
978
979
980
981
982
983
      if (reuse_inter_pred) {
        free_pred_buffer(best_pred);
        best_pred = this_mode_pred;
      }
    } else {
      if (reuse_inter_pred)
        free_pred_buffer(this_mode_pred);
984
    }
985

986
987
    if (x->skip)
      break;
988
989
  }

990
  mbmi->mode          = best_mode;
991
  mbmi->interp_filter = best_pred_filter;
992
993
994
  mbmi->tx_size       = best_tx_size;
  mbmi->ref_frame[0]  = best_ref_frame;
  mbmi->mv[0].as_int  = frame_mv[best_mode][best_ref_frame].as_int;
hkuang's avatar
hkuang committed
995
  xd->mi[0].src_mi->bmi[0].as_mv[0].as_int = mbmi->mv[0].as_int;
996
  x->skip_txfm[0] = best_mode_skip_txfm;
997

998
999
  // Perform intra prediction search, if the best SAD is above a certain
  // threshold.
1000
1001
1002
  if (best_rdc.rdcost == INT64_MAX ||
      (!x->skip && best_rdc.rdcost > inter_mode_thresh &&
       bsize <= cpi->sf.max_intra_bsize)) {
1003
1004
1005
1006
    struct estimate_block_intra_args args = { cpi, x, DC_PRED, 0, 0 };
    const TX_SIZE intra_tx_size =
        MIN(max_txsize_lookup[bsize],
            tx_mode_to_biggest_tx_size[cpi->common.tx_mode]);
1007
    int i;
1008
    TX_SIZE best_intra_tx_size = TX_SIZES;
1009

1010
1011
1012
    if (reuse_inter_pred && best_pred != NULL) {
      if (best_pred->data == orig_dst.buf) {
        this_mode_pred = &tmp[get_pred_buffer(tmp, 3)];
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
#if CONFIG_VP9_HIGHBITDEPTH
        if (cm->use_highbitdepth)
          vp9_highbd_convolve_copy(best_pred->data, best_pred->stride,
                                   this_mode_pred->data, this_mode_pred->stride,
                                   NULL, 0, NULL, 0, bw, bh, xd->bd);
        else
          vp9_convolve_copy(best_pred->data, best_pred->stride,
                          this_mode_pred->data, this_mode_pred->stride,
                          NULL, 0, NULL, 0, bw, bh);
#else
1023
1024
1025
        vp9_convolve_copy(best_pred->data, best_pred->stride,
                          this_mode_pred->data, this_mode_pred->stride,
                          NULL, 0, NULL, 0, bw, bh);
1026
#endif  // CONFIG_VP9_HIGHBITDEPTH
1027
1028
        best_pred = this_mode_pred;
      }
1029
    }
1030
    pd->dst = orig_dst;
1031

1032
1033
1034
1035
    for (i = 0; i < 4; ++i) {
      const PREDICTION_MODE this_mode = intra_mode_list[i];
      if (!((1 << this_mode) & cpi->sf.intra_y_mode_mask[intra_tx_size]))
        continue;
1036
1037
1038
1039
1040
1041
      args.mode = this_mode;
      args.rate = 0;
      args.dist = 0;
      mbmi->tx_size = intra_tx_size;
      vp9_foreach_transformed_block_in_plane(xd, bsize, 0,
                                             estimate_block_intra, &args);
1042
1043
1044
      this_rdc.rate = args.rate;
      this_rdc.dist = args.dist;
      this_rdc.rate += cpi->mbmode_cost[this_mode];
1045
      this_rdc.rate += ref_frame_cost[INTRA_FRAME];
1046
1047
1048
1049
      this_rdc.rate += intra_cost_penalty;
      this_rdc.rdcost = RDCOST(x->rdmult, x->rddiv,
                               this_rdc.rate, this_rdc.dist);

1050
      if (this_rdc.rdcost < best_rdc.rdcost) {
1051
        best_rdc = this_rdc;
1052
        mbmi->mode = this_mode;
1053
        best_intra_tx_size = mbmi->tx_size;
1054
1055
        mbmi->ref_frame[0] = INTRA_FRAME;
        mbmi->uv_mode = this_mode;
1056
        mbmi->mv[0].as_int = INVALID_MV;
1057
1058
      }
    }
1059
1060
1061
1062
1063

    // Reset mb_mode_info to the best inter mode.
    if (mbmi->ref_frame[0] != INTRA_FRAME) {
      x->skip_txfm[0] = best_mode_skip_txfm;
      mbmi->tx_size = best_tx_size;
1064
1065
    } else {
      mbmi->tx_size = best_intra_tx_size;
1066
    }
1067
1068
1069
  }

  pd->dst = orig_dst;
1070
1071
1072

  if (reuse_inter_pred && best_pred != NULL) {
    if (best_pred->data != orig_dst.buf && is_inter_mode(mbmi->mode)) {
1073
#if CONFIG_VP9_HIGHBITDEPTH
1074
1075
1076
1077
1078
1079
1080
1081
1082
      if (cm->use_highbitdepth)
        vp9_highbd_convolve_copy(best_pred->data, best_pred->stride,
                                 pd->dst.buf, pd->dst.stride, NULL, 0,
                                 NULL, 0, bw, bh, xd->bd);
      else
        vp9_convolve_copy(best_pred->data, best_pred->stride,
                          pd->dst.buf, pd->dst.stride, NULL, 0,
                          NULL, 0, bw, bh);
#else
1083
1084
1085
      vp9_convolve_copy(best_pred->data, best_pred->stride,
                        pd->dst.buf, pd->dst.stride, NULL, 0,
                        NULL, 0, bw, bh);
1086
#endif  // CONFIG_VP9_HIGHBITDEPTH
1087
    }
1088
  }
1089

1090
1091
1092
1093
1094
1095
  if (cpi->sf.adaptive_rd_thresh) {
    THR_MODES best_mode_idx = is_inter_block(mbmi) ?
        mode_idx[best_ref_frame][INTER_OFFSET(mbmi->mode)] :
        mode_idx[INTRA_FRAME][mbmi->mode];
    PREDICTION_MODE this_mode;
    for (ref_frame = LAST_FRAME; ref_frame <= GOLDEN_FRAME; ++ref_frame) {
1096
      if (best_ref_frame != ref_frame) continue;
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
      for (this_mode = NEARESTMV; this_mode <= NEWMV; ++this_mode) {
        THR_MODES thr_mode_idx = mode_idx[ref_frame][INTER_OFFSET(this_mode)];
        int *freq_fact = &tile_data->thresh_freq_fact[bsize][thr_mode_idx];
        if (thr_mode_idx == best_mode_idx)
          *freq_fact -= (*freq_fact >> 4);
        else
          *freq_fact = MIN(*freq_fact + RD_THRESH_INC,
                           cpi->sf.adaptive_rd_thresh * RD_THRESH_MAX_FACT);
      }
    }
  }
1108

1109
  *rd_cost = best_rdc;
1110
}
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137

void vp9_pick_inter_mode_sub8x8(VP9_COMP *cpi, MACROBLOCK *x,
                                TileDataEnc *tile_data,
                                int mi_row, int mi_col, RD_COST *rd_cost,
                                BLOCK_SIZE bsize, PICK_MODE_CONTEXT *ctx) {
  VP9_COMMON *const cm = &cpi->common;
  TileInfo *const tile_info = &tile_data->tile_info;
  SPEED_FEATURES *const sf = &cpi->sf;
  MACROBLOCKD *const xd = &x->e_mbd;
  MB_MODE_INFO *const mbmi = &xd->mi[0].src_mi->mbmi;
  const struct segmentation *const seg = &cm->seg;
  MV_REFERENCE_FRAME ref_frame, second_ref_frame = NONE;
  MV_REFERENCE_FRAME best_ref_frame = NONE;
  unsigned char segment_id = mbmi->segment_id;
  struct buf_2d yv12_mb[4][MAX_MB_PLANE];
  static const int flag_list[4] = { 0, VP9_LAST_FLAG, VP9_GOLD_FLAG,
                                    VP9_ALT_FLAG };
  int64_t best_rd = INT64_MAX;
  b_mode_info bsi[MAX_REF_FRAMES][4];
  int ref_frame_skip_mask = 0;
  const int num_4x4_blocks_wide = num_4x4_blocks_wide_lookup[bsize];
  const int num_4x4_blocks_high = num_4x4_blocks_high_lookup[bsize];
  int idx, idy;

  x->skip_encode = sf->skip_encode_frame && x->q_index < QIDX_SKIP_THRESH;
  ctx->pred_pixel_ready = 0;

1138
  for (ref_frame = LAST_FRAME; ref_frame <= GOLDEN_FRAME; ++ref_frame) {
1139
    const YV12_BUFFER_CONFIG *yv12 = get_ref_frame_buffer(cpi, ref_frame);
1140
1141
1142
    int_mv dummy_mv[2];
    x->pred_mv_sad[ref_frame] = INT_MAX;

1143
    if ((cpi->ref_frame_flags & flag_list[ref_frame]) && (yv12 != NULL)) {
1144
1145
      int_mv *const candidates = mbmi->ref_mvs[ref_frame];
      const struct scale_factors *const sf =
1146
                             &cm->frame_refs[ref_frame - 1].sf;
1147
1148
1149
      vp9_setup_pred_block(xd, yv12_mb[ref_frame], yv12, mi_row, mi_col,
                           sf, sf);
      vp9_find_mv_refs(cm, xd, tile_info, xd->mi[0].src_mi, ref_frame,
1150
                       candidates, mi_row, mi_col, NULL, NULL);
1151

1152
1153
1154
1155
1156
      vp9_find_best_ref_mvs(xd, cm->allow_high_precision_mv, candidates,
                            &dummy_mv[0], &dummy_mv[1]);
    } else {
      ref_frame_skip_mask |= (1 << ref_frame);
    }
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
  }

  mbmi->sb_type = bsize;
  mbmi->tx_size = TX_4X4;
  mbmi->uv_mode = DC_PRED;
  mbmi->ref_frame[0] = LAST_FRAME;
  mbmi->ref_frame[1] = NONE;
  mbmi->interp_filter = cm->interp_filter == SWITCHABLE ? EIGHTTAP
                                                        : cm->interp_filter;

  for (ref_frame = LAST_FRAME; ref_frame <= GOLDEN_FRAME; ++ref_frame) {
    int64_t this_rd = 0;
    int plane;

    if (ref_frame_skip_mask & (1 << ref_frame))
      continue;

    // TODO(jingning, agrange): Scaling reference frame not supported for
    // sub8x8 blocks. Is this supported now?
    if (ref_frame > INTRA_FRAME &&
        vp9_is_scaled(&cm->frame_refs[ref_frame - 1].sf))
      continue;

    // If the segment reference frame feature is enabled....
    // then do nothing if the current ref frame is not allowed..
    if (vp9_segfeature_active(seg, segment_id, SEG_LVL_REF_FRAME) &&
        vp9_get_segdata(seg, segment_id, SEG_LVL_REF_FRAME) != (int)ref_frame)
      continue;

    mbmi->ref_frame[0] = ref_frame;
    x->skip = 0;
    set_ref_ptrs(cm, xd, ref_frame, second_ref_frame);

    // Select prediction reference frames.
    for (plane = 0; plane < MAX_MB_PLANE; plane++)
      xd->plane[plane].pre[0] = yv12_mb[ref_frame][plane];

    for (idy = 0; idy < 2; idy += num_4x4_blocks_high) {
      for (idx = 0; idx < 2; idx += num_4x4_blocks_wide) {
1196
        int_mv b_mv[MB_MODE_COUNT];