vp9_reconinter.c 19.7 KB
Newer Older
John Koleszar's avatar
John Koleszar committed
1
/*
2
 *  Copyright (c) 2010 The WebM project authors. All Rights Reserved.
John Koleszar's avatar
John Koleszar committed
3
 *
4
 *  Use of this source code is governed by a BSD-style license
5
6
 *  that can be found in the LICENSE file in the root of the source
 *  tree. An additional intellectual property rights grant can be found
7
 *  in the file PATENTS.  All contributing project authors may
8
 *  be found in the AUTHORS file in the root of the source tree.
John Koleszar's avatar
John Koleszar committed
9
10
 */

11
#include <assert.h>
John Koleszar's avatar
John Koleszar committed
12

13
#include "./vpx_config.h"
14
#include "vpx/vpx_integer.h"
15
#include "vp9/common/vp9_blockd.h"
16
#include "vp9/common/vp9_filter.h"
17
#include "vp9/common/vp9_reconinter.h"
18
#include "vp9/common/vp9_reconintra.h"
John Koleszar's avatar
John Koleszar committed
19

20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
static int scale_value_x_with_scaling(int val,
                                      const struct scale_factors *scale) {
  return val * scale->x_num / scale->x_den;
}

static int scale_value_y_with_scaling(int val,
                                      const struct scale_factors *scale) {
  return val * scale->y_num / scale->y_den;
}

static int unscaled_value(int val, const struct scale_factors *scale) {
  (void) scale;
  return val;
}

static int_mv32 mv_q3_to_q4_with_scaling(const int_mv *src_mv,
                                         const struct scale_factors *scale) {
  // returns mv * scale + offset
  int_mv32 result;
  const int32_t mv_row_q4 = src_mv->as_mv.row << 1;
  const int32_t mv_col_q4 = src_mv->as_mv.col << 1;

  /* TODO(jkoleszar): make fixed point, or as a second multiply? */
  result.as_mv.row =  mv_row_q4 * scale->y_num / scale->y_den
                      + scale->y_offset_q4;
  result.as_mv.col =  mv_col_q4 * scale->x_num / scale->x_den
                      + scale->x_offset_q4;
  return result;
}

static int_mv32 mv_q3_to_q4_without_scaling(const int_mv *src_mv,
                                            const struct scale_factors *scale) {
  // returns mv * scale + offset
  int_mv32 result;

  result.as_mv.row = src_mv->as_mv.row << 1;
  result.as_mv.col = src_mv->as_mv.col << 1;
  return result;
}

static int32_t mv_component_q4_with_scaling(int mv_q4, int num, int den,
                                            int offset_q4) {
  // returns the scaled and offset value of the mv component.

  /* TODO(jkoleszar): make fixed point, or as a second multiply? */
  return mv_q4 * num / den + offset_q4;
}

static int32_t mv_component_q4_without_scaling(int mv_q4, int num, int den,
                                               int offset_q4) {
  // returns the scaled and offset value of the mv component.
  (void)num;
  (void)den;
  (void)offset_q4;
  return mv_q4;
}

static void set_offsets_with_scaling(struct scale_factors *scale,
                                     int row, int col) {
  const int x_q4 = 16 * col;
  const int y_q4 = 16 * row;

  scale->x_offset_q4 = (x_q4 * scale->x_num / scale->x_den) & 0xf;
  scale->y_offset_q4 = (y_q4 * scale->y_num / scale->y_den) & 0xf;
}

static void set_offsets_without_scaling(struct scale_factors *scale,
                                        int row, int col) {
  scale->x_offset_q4 = 0;
  scale->y_offset_q4 = 0;
}

92
void vp9_setup_scale_factors_for_frame(struct scale_factors *scale,
93
                                       int other_w, int other_h,
94
95
96
97
98
                                       int this_w, int this_h) {
  scale->x_num = other_w;
  scale->x_den = this_w;
  scale->x_offset_q4 = 0;  // calculated per-mb
  scale->x_step_q4 = 16 * other_w / this_w;
99

100
101
102
103
104
  scale->y_num = other_h;
  scale->y_den = this_h;
  scale->y_offset_q4 = 0;  // calculated per-mb
  scale->y_step_q4 = 16 * other_h / this_h;

105
106
107
108
  if (scale->x_num == scale->x_den && scale->y_num == scale->y_den) {
    scale->scale_value_x = unscaled_value;
    scale->scale_value_y = unscaled_value;
    scale->set_scaled_offsets = set_offsets_without_scaling;
109
110
    scale->scale_mv_q3_to_q4 = mv_q3_to_q4_without_scaling;
    scale->scale_mv_component_q4 = mv_component_q4_without_scaling;
111
112
113
114
  } else {
    scale->scale_value_x = scale_value_x_with_scaling;
    scale->scale_value_y = scale_value_y_with_scaling;
    scale->set_scaled_offsets = set_offsets_with_scaling;
115
116
    scale->scale_mv_q3_to_q4 = mv_q3_to_q4_with_scaling;
    scale->scale_mv_component_q4 = mv_component_q4_with_scaling;
117
118
  }

119
120
121
122
123
124
  // TODO(agrange): Investigate the best choice of functions to use here
  // for EIGHTTAP_SMOOTH. Since it is not interpolating, need to choose what
  // to do at full-pel offsets. The current selection, where the filter is
  // applied in one direction only, and not at all for 0,0, seems to give the
  // best quality, but it may be worth trying an additional mode that does
  // do the filtering on full-pel.
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
  if (scale->x_step_q4 == 16) {
    if (scale->y_step_q4 == 16) {
      // No scaling in either direction.
      scale->predict[0][0][0] = vp9_convolve_copy;
      scale->predict[0][0][1] = vp9_convolve_avg;
      scale->predict[0][1][0] = vp9_convolve8_vert;
      scale->predict[0][1][1] = vp9_convolve8_avg_vert;
      scale->predict[1][0][0] = vp9_convolve8_horiz;
      scale->predict[1][0][1] = vp9_convolve8_avg_horiz;
    } else {
      // No scaling in x direction. Must always scale in the y direction.
      scale->predict[0][0][0] = vp9_convolve8_vert;
      scale->predict[0][0][1] = vp9_convolve8_avg_vert;
      scale->predict[0][1][0] = vp9_convolve8_vert;
      scale->predict[0][1][1] = vp9_convolve8_avg_vert;
      scale->predict[1][0][0] = vp9_convolve8;
      scale->predict[1][0][1] = vp9_convolve8_avg;
    }
  } else {
    if (scale->y_step_q4 == 16) {
      // No scaling in the y direction. Must always scale in the x direction.
      scale->predict[0][0][0] = vp9_convolve8_horiz;
      scale->predict[0][0][1] = vp9_convolve8_avg_horiz;
      scale->predict[0][1][0] = vp9_convolve8;
      scale->predict[0][1][1] = vp9_convolve8_avg;
      scale->predict[1][0][0] = vp9_convolve8_horiz;
      scale->predict[1][0][1] = vp9_convolve8_avg_horiz;
    } else {
      // Must always scale in both directions.
      scale->predict[0][0][0] = vp9_convolve8;
      scale->predict[0][0][1] = vp9_convolve8_avg;
      scale->predict[0][1][0] = vp9_convolve8;
      scale->predict[0][1][1] = vp9_convolve8_avg;
      scale->predict[1][0][0] = vp9_convolve8;
      scale->predict[1][0][1] = vp9_convolve8_avg;
    }
  }
  // 2D subpel motion always gets filtered in both directions
  scale->predict[1][1][0] = vp9_convolve8;
  scale->predict[1][1][1] = vp9_convolve8_avg;
}

void vp9_setup_interp_filters(MACROBLOCKD *xd,
                              INTERPOLATIONFILTERTYPE mcomp_filter_type,
                              VP9_COMMON *cm) {
  if (xd->mode_info_context) {
    MB_MODE_INFO *mbmi = &xd->mode_info_context->mbmi;

    set_scale_factors(xd,
Ronald S. Bultje's avatar
Ronald S. Bultje committed
174
175
                      mbmi->ref_frame[0] - 1,
                      mbmi->ref_frame[1] - 1,
176
177
178
                      cm->active_ref_scale);
  }

179
180
181
182
183
184
185
186
187
188
189
190
191
192
  switch (mcomp_filter_type) {
    case EIGHTTAP:
    case SWITCHABLE:
      xd->subpix.filter_x = xd->subpix.filter_y = vp9_sub_pel_filters_8;
      break;
    case EIGHTTAP_SMOOTH:
      xd->subpix.filter_x = xd->subpix.filter_y = vp9_sub_pel_filters_8lp;
      break;
    case EIGHTTAP_SHARP:
      xd->subpix.filter_x = xd->subpix.filter_y = vp9_sub_pel_filters_8s;
      break;
    case BILINEAR:
      xd->subpix.filter_x = xd->subpix.filter_y = vp9_bilinear_filters;
      break;
Adrian Grange's avatar
Adrian Grange committed
193
  }
194
  assert(((intptr_t)xd->subpix.filter_x & 0xff) == 0);
195
196
}

197
void vp9_copy_mem16x16_c(const uint8_t *src,
198
                         int src_stride,
199
                         uint8_t *dst,
200
                         int dst_stride) {
John Koleszar's avatar
John Koleszar committed
201
  int r;
John Koleszar's avatar
John Koleszar committed
202

John Koleszar's avatar
John Koleszar committed
203
  for (r = 0; r < 16; r++) {
204
#if !(CONFIG_FAST_UNALIGNED)
John Koleszar's avatar
John Koleszar committed
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
    dst[0] = src[0];
    dst[1] = src[1];
    dst[2] = src[2];
    dst[3] = src[3];
    dst[4] = src[4];
    dst[5] = src[5];
    dst[6] = src[6];
    dst[7] = src[7];
    dst[8] = src[8];
    dst[9] = src[9];
    dst[10] = src[10];
    dst[11] = src[11];
    dst[12] = src[12];
    dst[13] = src[13];
    dst[14] = src[14];
    dst[15] = src[15];
John Koleszar's avatar
John Koleszar committed
221
222

#else
223
224
225
226
    ((uint32_t *)dst)[0] = ((const uint32_t *)src)[0];
    ((uint32_t *)dst)[1] = ((const uint32_t *)src)[1];
    ((uint32_t *)dst)[2] = ((const uint32_t *)src)[2];
    ((uint32_t *)dst)[3] = ((const uint32_t *)src)[3];
John Koleszar's avatar
John Koleszar committed
227
228

#endif
John Koleszar's avatar
John Koleszar committed
229
230
231
    src += src_stride;
    dst += dst_stride;
  }
John Koleszar's avatar
John Koleszar committed
232
233
}

234
void vp9_copy_mem8x8_c(const uint8_t *src,
235
                       int src_stride,
236
                       uint8_t *dst,
237
                       int dst_stride) {
John Koleszar's avatar
John Koleszar committed
238
239
240
  int r;

  for (r = 0; r < 8; r++) {
241
#if !(CONFIG_FAST_UNALIGNED)
John Koleszar's avatar
John Koleszar committed
242
243
244
245
246
247
248
249
    dst[0] = src[0];
    dst[1] = src[1];
    dst[2] = src[2];
    dst[3] = src[3];
    dst[4] = src[4];
    dst[5] = src[5];
    dst[6] = src[6];
    dst[7] = src[7];
John Koleszar's avatar
John Koleszar committed
250
#else
251
252
    ((uint32_t *)dst)[0] = ((const uint32_t *)src)[0];
    ((uint32_t *)dst)[1] = ((const uint32_t *)src)[1];
John Koleszar's avatar
John Koleszar committed
253
#endif
John Koleszar's avatar
John Koleszar committed
254
255
256
    src += src_stride;
    dst += dst_stride;
  }
John Koleszar's avatar
John Koleszar committed
257
258
}

259
void vp9_copy_mem8x4_c(const uint8_t *src,
260
                       int src_stride,
261
                       uint8_t *dst,
262
                       int dst_stride) {
John Koleszar's avatar
John Koleszar committed
263
264
265
  int r;

  for (r = 0; r < 4; r++) {
266
#if !(CONFIG_FAST_UNALIGNED)
John Koleszar's avatar
John Koleszar committed
267
268
269
270
271
272
273
274
    dst[0] = src[0];
    dst[1] = src[1];
    dst[2] = src[2];
    dst[3] = src[3];
    dst[4] = src[4];
    dst[5] = src[5];
    dst[6] = src[6];
    dst[7] = src[7];
John Koleszar's avatar
John Koleszar committed
275
#else
276
277
    ((uint32_t *)dst)[0] = ((const uint32_t *)src)[0];
    ((uint32_t *)dst)[1] = ((const uint32_t *)src)[1];
John Koleszar's avatar
John Koleszar committed
278
#endif
John Koleszar's avatar
John Koleszar committed
279
280
281
    src += src_stride;
    dst += dst_stride;
  }
John Koleszar's avatar
John Koleszar committed
282
283
}

284
285
286
287
void vp9_build_inter_predictor(const uint8_t *src, int src_stride,
                               uint8_t *dst, int dst_stride,
                               const int_mv *mv_q3,
                               const struct scale_factors *scale,
288
                               int w, int h, int weight,
289
                               const struct subpix_fn_table *subpix) {
290
  int_mv32 mv = scale->scale_mv_q3_to_q4(mv_q3, scale);
291
  src += (mv.as_mv.row >> 4) * src_stride + (mv.as_mv.col >> 4);
292
  scale->predict[!!(mv.as_mv.col & 15)][!!(mv.as_mv.row & 15)][weight](
293
      src, src_stride, dst, dst_stride,
294
295
      subpix->filter_x[mv.as_mv.col & 15], scale->x_step_q4,
      subpix->filter_y[mv.as_mv.row & 15], scale->y_step_q4,
296
      w, h);
John Koleszar's avatar
John Koleszar committed
297
298
}

299
300
void vp9_build_inter_predictor_q4(const uint8_t *src, int src_stride,
                                  uint8_t *dst, int dst_stride,
301
                                  const int_mv *mv_q4,
302
                                  const struct scale_factors *scale,
303
                                  int w, int h, int weight,
304
                                  const struct subpix_fn_table *subpix) {
305
306
307
308
309
310
311
312
  const int scaled_mv_row_q4 = scale->scale_mv_component_q4(mv_q4->as_mv.row,
                                                            scale->y_num,
                                                            scale->y_den,
                                                            scale->y_offset_q4);
  const int scaled_mv_col_q4 = scale->scale_mv_component_q4(mv_q4->as_mv.col,
                                                            scale->x_num,
                                                            scale->x_den,
                                                            scale->x_offset_q4);
313
314
315
  const int subpel_x = scaled_mv_col_q4 & 15;
  const int subpel_y = scaled_mv_row_q4 & 15;

316
  src += (scaled_mv_row_q4 >> 4) * src_stride + (scaled_mv_col_q4 >> 4);
317
  scale->predict[!!subpel_x][!!subpel_y][weight](
318
      src, src_stride, dst, dst_stride,
319
320
      subpix->filter_x[subpel_x], scale->x_step_q4,
      subpix->filter_y[subpel_y], scale->y_step_q4,
321
      w, h);
322
323
}

324
325
static INLINE int round_mv_comp_q4(int value) {
  return (value < 0 ? value - 2 : value + 2) / 4;
John Koleszar's avatar
John Koleszar committed
326
327
}

328
329
330
331
332
static int mi_mv_pred_row_q4(MACROBLOCKD *mb, int idx) {
  const int temp = mb->mode_info_context->bmi[0].as_mv[idx].as_mv.row +
                   mb->mode_info_context->bmi[1].as_mv[idx].as_mv.row +
                   mb->mode_info_context->bmi[2].as_mv[idx].as_mv.row +
                   mb->mode_info_context->bmi[3].as_mv[idx].as_mv.row;
333
  return round_mv_comp_q4(temp);
334
335
}

336
337
338
339
340
static int mi_mv_pred_col_q4(MACROBLOCKD *mb, int idx) {
  const int temp = mb->mode_info_context->bmi[0].as_mv[idx].as_mv.col +
                   mb->mode_info_context->bmi[1].as_mv[idx].as_mv.col +
                   mb->mode_info_context->bmi[2].as_mv[idx].as_mv.col +
                   mb->mode_info_context->bmi[3].as_mv[idx].as_mv.col;
341
  return round_mv_comp_q4(temp);
342
343
}

344
345
// TODO(jkoleszar): yet another mv clamping function :-(
MV clamp_mv_to_umv_border_sb(const MV *src_mv,
346
    int bwl, int bhl, int ss_x, int ss_y,
347
348
349
350
351
352
    int mb_to_left_edge, int mb_to_top_edge,
    int mb_to_right_edge, int mb_to_bottom_edge) {
  /* If the MV points so far into the UMV border that no visible pixels
   * are used for reconstruction, the subpel part of the MV can be
   * discarded and the MV limited to 16 pixels with equivalent results.
   */
353
354
355
356
  const int spel_left = (VP9_INTERP_EXTEND + (4 << bwl)) << 4;
  const int spel_right = spel_left - (1 << 4);
  const int spel_top = (VP9_INTERP_EXTEND + (4 << bhl)) << 4;
  const int spel_bottom = spel_top - (1 << 4);
357
  MV clamped_mv;
358
359
360
361
362
363
364
365
366

  assert(ss_x <= 1);
  assert(ss_y <= 1);
  clamped_mv.col = clamp(src_mv->col << (1 - ss_x),
                         (mb_to_left_edge << (1 - ss_x)) - spel_left,
                         (mb_to_right_edge << (1 - ss_x)) + spel_right);
  clamped_mv.row = clamp(src_mv->row << (1 - ss_y),
                         (mb_to_top_edge << (1 - ss_y)) - spel_top,
                         (mb_to_bottom_edge << (1 - ss_y)) + spel_bottom);
367
368
369
370
371
372
373
  return clamped_mv;
}

struct build_inter_predictors_args {
  MACROBLOCKD *xd;
  int x;
  int y;
374
375
376
377
  uint8_t* dst[MAX_MB_PLANE];
  int dst_stride[MAX_MB_PLANE];
  uint8_t* pre[2][MAX_MB_PLANE];
  int pre_stride[2][MAX_MB_PLANE];
378
379
380
381
382
383
};
static void build_inter_predictors(int plane, int block,
                                   BLOCK_SIZE_TYPE bsize,
                                   int pred_w, int pred_h,
                                   void *argv) {
  const struct build_inter_predictors_args* const arg = argv;
384
385
386
  MACROBLOCKD * const xd = arg->xd;
  const int bwl = b_width_log2(bsize) - xd->plane[plane].subsampling_x;
  const int bhl = b_height_log2(bsize) - xd->plane[plane].subsampling_y;
387
388
  const int bh = 4 << bhl, bw = 4 << bwl;
  const int x = 4 * (block & ((1 << bwl) - 1)), y = 4 * (block >> bwl);
Ronald S. Bultje's avatar
Ronald S. Bultje committed
389
  const int use_second_ref = xd->mode_info_context->mbmi.ref_frame[1] > 0;
390
391
  int which_mv;

392
393
  assert(x < bw);
  assert(y < bh);
Ronald S. Bultje's avatar
Ronald S. Bultje committed
394
395
396
397
  assert(xd->mode_info_context->mbmi.sb_type < BLOCK_SIZE_SB8X8 ||
         4 << pred_w == bw);
  assert(xd->mode_info_context->mbmi.sb_type < BLOCK_SIZE_SB8X8 ||
         4 << pred_h == bh);
398

399
  for (which_mv = 0; which_mv < 1 + use_second_ref; ++which_mv) {
400
    // source
401
402
    const uint8_t * const base_pre = arg->pre[which_mv][plane];
    const int pre_stride = arg->pre_stride[which_mv][plane];
403
404
405
406
407
    const uint8_t *const pre = base_pre +
        scaled_buffer_offset(x, y, pre_stride, &xd->scale_factor[which_mv]);
    struct scale_factors * const scale =
      plane == 0 ? &xd->scale_factor[which_mv] : &xd->scale_factor_uv[which_mv];

408
409
410
411
412
413
    // dest
    uint8_t *const dst = arg->dst[plane] + arg->dst_stride[plane] * y + x;

    // motion vector
    const MV *mv;
    MV split_chroma_mv;
414
    int_mv clamped_mv;
415

Ronald S. Bultje's avatar
Ronald S. Bultje committed
416
    if (xd->mode_info_context->mbmi.sb_type < BLOCK_SIZE_SB8X8) {
417
      if (plane == 0) {
Scott LaVarnway's avatar
Scott LaVarnway committed
418
        mv = &xd->mode_info_context->bmi[block].as_mv[which_mv].as_mv;
419
      } else {
420
421
422
423
424
425
        // TODO(jkoleszar): All chroma MVs in SPLITMV mode are taken as the
        // same MV (the average of the 4 luma MVs) but we could do something
        // smarter for non-4:2:0. Just punt for now, pending the changes to get
        // rid of SPLITMV mode entirely.
        split_chroma_mv.row = mi_mv_pred_row_q4(xd, which_mv);
        split_chroma_mv.col = mi_mv_pred_col_q4(xd, which_mv);
426
427
428
429
430
431
        mv = &split_chroma_mv;
      }
    } else {
      mv = &xd->mode_info_context->mbmi.mv[which_mv].as_mv;
    }

432
433
434
435
436
    /* TODO(jkoleszar): This clamping is done in the incorrect place for the
     * scaling case. It needs to be done on the scaled MV, not the pre-scaling
     * MV. Note however that it performs the subsampling aware scaling so
     * that the result is always q4.
     */
437
    clamped_mv.as_mv = clamp_mv_to_umv_border_sb(mv, bwl, bhl,
438
439
                                                 xd->plane[plane].subsampling_x,
                                                 xd->plane[plane].subsampling_y,
440
441
442
443
444
445
                                                 xd->mb_to_left_edge,
                                                 xd->mb_to_top_edge,
                                                 xd->mb_to_right_edge,
                                                 xd->mb_to_bottom_edge);
    scale->set_scaled_offsets(scale, arg->y + y, arg->x + x);

446
    vp9_build_inter_predictor_q4(pre, pre_stride,
447
                                 dst, arg->dst_stride[plane],
448
                                 &clamped_mv, &xd->scale_factor[which_mv],
449
450
                                 4 << pred_w, 4 << pred_h, which_mv,
                                 &xd->subpix);
451
452
453
  }
}
void vp9_build_inter_predictors_sby(MACROBLOCKD *xd,
454
455
                                    int mi_row,
                                    int mi_col,
456
457
                                    BLOCK_SIZE_TYPE bsize) {
  struct build_inter_predictors_args args = {
458
    xd, mi_col * MI_SIZE, mi_row * MI_SIZE,
459
    {xd->plane[0].dst.buf, NULL, NULL}, {xd->plane[0].dst.stride, 0, 0},
460
461
462
    {{xd->plane[0].pre[0].buf, NULL, NULL},
     {xd->plane[0].pre[1].buf, NULL, NULL}},
    {{xd->plane[0].pre[0].stride, 0, 0}, {xd->plane[0].pre[1].stride, 0, 0}},
463
  };
464

465
466
  foreach_predicted_block_in_plane(xd, bsize, 0, build_inter_predictors, &args);
}
467
void vp9_build_inter_predictors_sbuv(MACROBLOCKD *xd,
468
469
                                     int mi_row,
                                     int mi_col,
470
471
                                     BLOCK_SIZE_TYPE bsize) {
  struct build_inter_predictors_args args = {
472
    xd, mi_col * MI_SIZE, mi_row * MI_SIZE,
473
474
475
476
477
478
479
480
481
482
483
484
485
486
#if CONFIG_ALPHA
    {NULL, xd->plane[1].dst.buf, xd->plane[2].dst.buf,
     xd->plane[3].dst.buf},
    {0, xd->plane[1].dst.stride, xd->plane[1].dst.stride,
     xd->plane[3].dst.stride},
    {{NULL, xd->plane[1].pre[0].buf, xd->plane[2].pre[0].buf,
      xd->plane[3].pre[0].buf},
     {NULL, xd->plane[1].pre[1].buf, xd->plane[2].pre[1].buf,
      xd->plane[3].pre[1].buf}},
    {{0, xd->plane[1].pre[0].stride, xd->plane[1].pre[0].stride,
      xd->plane[3].pre[0].stride},
     {0, xd->plane[1].pre[1].stride, xd->plane[1].pre[1].stride,
      xd->plane[3].pre[1].stride}},
#else
487
488
    {NULL, xd->plane[1].dst.buf, xd->plane[2].dst.buf},
    {0, xd->plane[1].dst.stride, xd->plane[1].dst.stride},
489
490
491
492
    {{NULL, xd->plane[1].pre[0].buf, xd->plane[2].pre[0].buf},
     {NULL, xd->plane[1].pre[1].buf, xd->plane[2].pre[1].buf}},
    {{0, xd->plane[1].pre[0].stride, xd->plane[1].pre[0].stride},
     {0, xd->plane[1].pre[1].stride, xd->plane[1].pre[1].stride}},
493
#endif
494
495
496
  };
  foreach_predicted_block_uv(xd, bsize, build_inter_predictors, &args);
}
497
void vp9_build_inter_predictors_sb(MACROBLOCKD *xd,
498
                                   int mi_row, int mi_col,
499
                                   BLOCK_SIZE_TYPE bsize) {
500

501
502
  vp9_build_inter_predictors_sby(xd, mi_row, mi_col, bsize);
  vp9_build_inter_predictors_sbuv(xd, mi_row, mi_col, bsize);
503
}
504

505
506
/*encoder only*/
void vp9_build_inter4x4_predictors_mbuv(MACROBLOCKD *xd,
507
                                        int mb_row, int mb_col) {
508
  vp9_build_inter_predictors_sbuv(xd, mb_row, mb_col,
509
                                  BLOCK_SIZE_MB16X16);
510
}
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525

// TODO(dkovalev: find better place for this function)
void vp9_setup_scale_factors(VP9_COMMON *cm, int i) {
  const int ref = cm->active_ref_idx[i];
  struct scale_factors *const sf = &cm->active_ref_scale[i];
  if (ref >= NUM_YV12_BUFFERS) {
    memset(sf, 0, sizeof(*sf));
  } else {
    YV12_BUFFER_CONFIG *const fb = &cm->yv12_fb[ref];
    vp9_setup_scale_factors_for_frame(sf,
                                      fb->y_crop_width, fb->y_crop_height,
                                      cm->width, cm->height);
  }
}