vp9_temporal_filter.c 17.7 KB
Newer Older
Johann's avatar
Johann committed
1
2
3
4
5
6
7
8
9
10
/*
 *  Copyright (c) 2010 The WebM project authors. All Rights Reserved.
 *
 *  Use of this source code is governed by a BSD-style license
 *  that can be found in the LICENSE file in the root of the source
 *  tree. An additional intellectual property rights grant can be found
 *  in the file PATENTS.  All contributing project authors may
 *  be found in the AUTHORS file in the root of the source tree.
 */

Dmitry Kovalev's avatar
Dmitry Kovalev committed
11
12
#include <math.h>
#include <limits.h>
Johann's avatar
Johann committed
13

14
#include "vp9/common/vp9_alloccommon.h"
15
#include "vp9/common/vp9_onyxc_int.h"
16
#include "vp9/common/vp9_quant_common.h"
17
#include "vp9/common/vp9_reconinter.h"
18
#include "vp9/common/vp9_systemdependent.h"
19
#include "vp9/encoder/vp9_extend.h"
20
#include "vp9/encoder/vp9_firstpass.h"
21
#include "vp9/encoder/vp9_mcomp.h"
Dmitry Kovalev's avatar
Dmitry Kovalev committed
22
#include "vp9/encoder/vp9_encoder.h"
23
#include "vp9/encoder/vp9_quantize.h"
24
25
#include "vp9/encoder/vp9_ratectrl.h"
#include "vp9/encoder/vp9_segmentation.h"
Johann's avatar
Johann committed
26
27
#include "vpx_mem/vpx_mem.h"
#include "vpx_ports/vpx_timer.h"
28
#include "vpx_scale/vpx_scale.h"
Johann's avatar
Johann committed
29

30
31
static int fixed_divide[512];

32
33
34
35
36
static void temporal_filter_predictors_mb_c(MACROBLOCKD *xd,
                                            uint8_t *y_mb_ptr,
                                            uint8_t *u_mb_ptr,
                                            uint8_t *v_mb_ptr,
                                            int stride,
37
38
                                            int uv_block_width,
                                            int uv_block_height,
39
40
                                            int mv_row,
                                            int mv_col,
Yunqing Wang's avatar
Yunqing Wang committed
41
                                            uint8_t *pred,
42
43
                                            struct scale_factors *scale,
                                            int x, int y) {
44
  const int which_mv = 0;
45
46
47
48
  const MV mv = { mv_row, mv_col };
  const InterpKernel *const kernel =
    vp9_get_interp_kernel(xd->mi[0]->mbmi.interp_filter);

Alex Converse's avatar
Alex Converse committed
49
50
  enum mv_precision mv_precision_uv;
  int uv_stride;
51
  if (uv_block_width == 8) {
Alex Converse's avatar
Alex Converse committed
52
53
54
55
56
57
    uv_stride = (stride + 1) >> 1;
    mv_precision_uv = MV_PRECISION_Q4;
  } else {
    uv_stride = stride;
    mv_precision_uv = MV_PRECISION_Q3;
  }
John Koleszar's avatar
John Koleszar committed
58

59
60
  vp9_build_inter_predictor(y_mb_ptr, stride,
                            &pred[0], 16,
61
                            &mv,
Yunqing Wang's avatar
Yunqing Wang committed
62
                            scale,
63
                            16, 16,
64
                            which_mv,
65
                            kernel, MV_PRECISION_Q3, x, y);
Johann's avatar
Johann committed
66

Alex Converse's avatar
Alex Converse committed
67
  vp9_build_inter_predictor(u_mb_ptr, uv_stride,
68
                            &pred[256], uv_block_width,
69
                            &mv,
Yunqing Wang's avatar
Yunqing Wang committed
70
                            scale,
71
                            uv_block_width, uv_block_height,
72
                            which_mv,
73
                            kernel, mv_precision_uv, x, y);
74

Alex Converse's avatar
Alex Converse committed
75
  vp9_build_inter_predictor(v_mb_ptr, uv_stride,
76
                            &pred[512], uv_block_width,
77
                            &mv,
Yunqing Wang's avatar
Yunqing Wang committed
78
                            scale,
79
                            uv_block_width, uv_block_height,
80
                            which_mv,
81
                            kernel, mv_precision_uv, x, y);
Johann's avatar
Johann committed
82
}
83

84
85
86
87
88
89
90
91
void vp9_temporal_filter_init() {
  int i;

  fixed_divide[0] = 0;
  for (i = 1; i < 512; ++i)
    fixed_divide[i] = 0x80000 / i;
}

92
93
94
void vp9_temporal_filter_apply_c(uint8_t *frame1,
                                 unsigned int stride,
                                 uint8_t *frame2,
95
96
                                 unsigned int block_width,
                                 unsigned int block_height,
97
98
99
100
                                 int strength,
                                 int filter_weight,
                                 unsigned int *accumulator,
                                 uint16_t *count) {
John Koleszar's avatar
John Koleszar committed
101
102
103
  unsigned int i, j, k;
  int modifier;
  int byte = 0;
104
  const int rounding = strength > 0 ? 1 << (strength - 1) : 0;
John Koleszar's avatar
John Koleszar committed
105

106
107
  for (i = 0, k = 0; i < block_height; i++) {
    for (j = 0; j < block_width; j++, k++) {
John Koleszar's avatar
John Koleszar committed
108
109
110
111
112
113
114
115
116
      int src_byte = frame1[byte];
      int pixel_value = *frame2++;

      modifier   = src_byte - pixel_value;
      // This is an integer approximation of:
      // float coeff = (3.0 * modifer * modifier) / pow(2, strength);
      // modifier =  (int)roundf(coeff > 16 ? 0 : 16-coeff);
      modifier  *= modifier;
      modifier  *= 3;
117
      modifier  += rounding;
John Koleszar's avatar
John Koleszar committed
118
119
120
121
122
123
124
125
126
127
128
129
      modifier >>= strength;

      if (modifier > 16)
        modifier = 16;

      modifier = 16 - modifier;
      modifier *= filter_weight;

      count[k] += modifier;
      accumulator[k] += modifier * pixel_value;

      byte++;
Johann's avatar
Johann committed
130
    }
John Koleszar's avatar
John Koleszar committed
131

132
    byte += stride - block_width;
John Koleszar's avatar
John Koleszar committed
133
  }
Johann's avatar
Johann committed
134
135
}

136
static int temporal_filter_find_matching_mb_c(VP9_COMP *cpi,
137
138
                                              uint8_t *arf_frame_buf,
                                              uint8_t *frame_ptr_buf,
139
                                              int stride) {
140
141
142
  MACROBLOCK *const x = &cpi->mb;
  MACROBLOCKD *const xd = &x->e_mbd;
  const MV_SPEED_FEATURES *const mv_sf = &cpi->sf.mv;
John Koleszar's avatar
John Koleszar committed
143
144
145
  int step_param;
  int sadpb = x->sadperbit16;
  int bestsme = INT_MAX;
146
147
  int distortion;
  unsigned int sse;
John Koleszar's avatar
John Koleszar committed
148

Dmitry Kovalev's avatar
Dmitry Kovalev committed
149
150
  MV best_ref_mv1 = {0, 0};
  MV best_ref_mv1_full; /* full-pixel value of best_ref_mv1 */
151
  MV *ref_mv = &x->e_mbd.mi[0]->bmi[0].as_mv[0].as_mv;
John Koleszar's avatar
John Koleszar committed
152
153

  // Save input state
John Koleszar's avatar
John Koleszar committed
154
  struct buf_2d src = x->plane[0].src;
155
  struct buf_2d pre = xd->plane[0].pre[0];
John Koleszar's avatar
John Koleszar committed
156

Dmitry Kovalev's avatar
Dmitry Kovalev committed
157
158
  best_ref_mv1_full.col = best_ref_mv1.col >> 3;
  best_ref_mv1_full.row = best_ref_mv1.row >> 3;
John Koleszar's avatar
John Koleszar committed
159
160

  // Setup frame pointers
161
162
163
164
  x->plane[0].src.buf = arf_frame_buf;
  x->plane[0].src.stride = stride;
  xd->plane[0].pre[0].buf = frame_ptr_buf;
  xd->plane[0].pre[0].stride = stride;
John Koleszar's avatar
John Koleszar committed
165

166
167
  step_param = mv_sf->reduce_first_step_size;
  step_param = MIN(step_param, MAX_MVSEARCH_STEPS - 2);
John Koleszar's avatar
John Koleszar committed
168
169

  // Ignore mv costing by sending NULL pointer instead of cost arrays
170
171
  vp9_hex_search(x, &best_ref_mv1_full, step_param, sadpb, 1,
                 &cpi->fn_ptr[BLOCK_16X16], 0, &best_ref_mv1, ref_mv);
Johann's avatar
Johann committed
172

173
174
175
176
177
178
  // Ignore mv costing by sending NULL pointer instead of cost array
  bestsme = cpi->find_fractional_mv_step(x, ref_mv,
                                         &best_ref_mv1,
                                         cpi->common.allow_high_precision_mv,
                                         x->errorperbit,
                                         &cpi->fn_ptr[BLOCK_16X16],
179
                                         0, mv_sf->subpel_iters_per_step,
180
                                         NULL, NULL,
181
                                         &distortion, &sse, NULL, 0, 0);
Johann's avatar
Johann committed
182

183
  // Restore input state
John Koleszar's avatar
John Koleszar committed
184
  x->plane[0].src = src;
185
  xd->plane[0].pre[0] = pre;
Johann's avatar
Johann committed
186

John Koleszar's avatar
John Koleszar committed
187
  return bestsme;
Johann's avatar
Johann committed
188
189
}

190
191
192
static void temporal_filter_iterate_c(VP9_COMP *cpi,
                                      int frame_count,
                                      int alt_ref_index,
Yunqing Wang's avatar
Yunqing Wang committed
193
194
                                      int strength,
                                      struct scale_factors *scale) {
John Koleszar's avatar
John Koleszar committed
195
196
197
198
199
200
201
202
  int byte;
  int frame;
  int mb_col, mb_row;
  unsigned int filter_weight;
  int mb_cols = cpi->common.mb_cols;
  int mb_rows = cpi->common.mb_rows;
  int mb_y_offset = 0;
  int mb_uv_offset = 0;
Alex Converse's avatar
Alex Converse committed
203
204
  DECLARE_ALIGNED_ARRAY(16, unsigned int, accumulator, 16 * 16 * 3);
  DECLARE_ALIGNED_ARRAY(16, uint16_t, count, 16 * 16 * 3);
John Koleszar's avatar
John Koleszar committed
205
206
  MACROBLOCKD *mbd = &cpi->mb.e_mbd;
  YV12_BUFFER_CONFIG *f = cpi->frames[alt_ref_index];
207
  uint8_t *dst1, *dst2;
Alex Converse's avatar
Alex Converse committed
208
209
  DECLARE_ALIGNED_ARRAY(16, uint8_t,  predictor, 16 * 16 * 3);
  const int mb_uv_height = 16 >> mbd->plane[1].subsampling_y;
210
  const int mb_uv_width  = 16 >> mbd->plane[1].subsampling_x;
John Koleszar's avatar
John Koleszar committed
211
212

  // Save input state
213
214
215
216
217
  uint8_t* input_buffer[MAX_MB_PLANE];
  int i;

  for (i = 0; i < MAX_MB_PLANE; i++)
    input_buffer[i] = mbd->plane[i].pre[0].buf;
John Koleszar's avatar
John Koleszar committed
218
219

  for (mb_row = 0; mb_row < mb_rows; mb_row++) {
220
    // Source frames are extended to 16 pixels. This is different than
221
    //  L/A/G reference frames that have a border of 32 (VP9ENCBORDERINPIXELS)
John Koleszar's avatar
John Koleszar committed
222
223
    // A 6/8 tap filter is used for motion search.  This requires 2 pixels
    //  before and 3 pixels after.  So the largest Y mv on a border would
John Koleszar's avatar
John Koleszar committed
224
225
226
227
228
    //  then be 16 - VP9_INTERP_EXTEND. The UV blocks are half the size of the
    //  Y and therefore only extended by 8.  The largest mv that a UV block
    //  can support is 8 - VP9_INTERP_EXTEND.  A UV mv is half of a Y mv.
    //  (16 - VP9_INTERP_EXTEND) >> 1 which is greater than
    //  8 - VP9_INTERP_EXTEND.
John Koleszar's avatar
John Koleszar committed
229
    // To keep the mv in play for both Y and UV planes the max that it
John Koleszar's avatar
John Koleszar committed
230
231
    //  can be on a border is therefore 16 - (2*VP9_INTERP_EXTEND+1).
    cpi->mb.mv_row_min = -((mb_row * 16) + (17 - 2 * VP9_INTERP_EXTEND));
John Koleszar's avatar
John Koleszar committed
232
    cpi->mb.mv_row_max = ((cpi->common.mb_rows - 1 - mb_row) * 16)
John Koleszar's avatar
John Koleszar committed
233
                         + (17 - 2 * VP9_INTERP_EXTEND);
Johann's avatar
Johann committed
234

John Koleszar's avatar
John Koleszar committed
235
236
237
    for (mb_col = 0; mb_col < mb_cols; mb_col++) {
      int i, j, k;
      int stride;
Johann's avatar
Johann committed
238

Alex Converse's avatar
Alex Converse committed
239
240
      vpx_memset(accumulator, 0, 16 * 16 * 3 * sizeof(accumulator[0]));
      vpx_memset(count, 0, 16 * 16 * 3 * sizeof(count[0]));
Johann's avatar
Johann committed
241

John Koleszar's avatar
John Koleszar committed
242
      cpi->mb.mv_col_min = -((mb_col * 16) + (17 - 2 * VP9_INTERP_EXTEND));
John Koleszar's avatar
John Koleszar committed
243
      cpi->mb.mv_col_max = ((cpi->common.mb_cols - 1 - mb_col) * 16)
John Koleszar's avatar
John Koleszar committed
244
                           + (17 - 2 * VP9_INTERP_EXTEND);
Johann's avatar
Johann committed
245

John Koleszar's avatar
John Koleszar committed
246
      for (frame = 0; frame < frame_count; frame++) {
247
248
249
        const int thresh_low  = 10000;
        const int thresh_high = 20000;

John Koleszar's avatar
John Koleszar committed
250
251
252
        if (cpi->frames[frame] == NULL)
          continue;

253
254
        mbd->mi[0]->bmi[0].as_mv[0].as_mv.row = 0;
        mbd->mi[0]->bmi[0].as_mv[0].as_mv.col = 0;
John Koleszar's avatar
John Koleszar committed
255
256
257
258
259

        if (frame == alt_ref_index) {
          filter_weight = 2;
        } else {
          // Find best match in this frame by MC
260
261
262
263
264
          int err = temporal_filter_find_matching_mb_c(cpi,
              cpi->frames[alt_ref_index]->y_buffer + mb_y_offset,
              cpi->frames[frame]->y_buffer + mb_y_offset,
              cpi->frames[frame]->y_stride);

John Koleszar's avatar
John Koleszar committed
265
266
267
          // Assign higher weight to matching MB if it's error
          // score is lower. If not applying MC default behavior
          // is to weight all MBs equal.
268
269
          filter_weight = err < thresh_low
                          ? 2 : err < thresh_high ? 1 : 0;
John Koleszar's avatar
John Koleszar committed
270
271
272
273
        }

        if (filter_weight != 0) {
          // Construct the predictors
274
275
276
277
278
          temporal_filter_predictors_mb_c(mbd,
              cpi->frames[frame]->y_buffer + mb_y_offset,
              cpi->frames[frame]->u_buffer + mb_uv_offset,
              cpi->frames[frame]->v_buffer + mb_uv_offset,
              cpi->frames[frame]->y_stride,
279
              mb_uv_width, mb_uv_height,
280
281
282
283
              mbd->mi[0]->bmi[0].as_mv[0].as_mv.row,
              mbd->mi[0]->bmi[0].as_mv[0].as_mv.col,
              predictor, scale,
              mb_col * 16, mb_row * 16);
John Koleszar's avatar
John Koleszar committed
284
285

          // Apply the filter (YUV)
Jim Bankoski's avatar
Jim Bankoski committed
286
          vp9_temporal_filter_apply(f->y_buffer + mb_y_offset, f->y_stride,
287
288
                                    predictor, 16, 16,
                                    strength, filter_weight,
Jim Bankoski's avatar
Jim Bankoski committed
289
290
                                    accumulator, count);
          vp9_temporal_filter_apply(f->u_buffer + mb_uv_offset, f->uv_stride,
291
292
                                    predictor + 256,
                                    mb_uv_width, mb_uv_height, strength,
Alex Converse's avatar
Alex Converse committed
293
294
                                    filter_weight, accumulator + 256,
                                    count + 256);
Jim Bankoski's avatar
Jim Bankoski committed
295
          vp9_temporal_filter_apply(f->v_buffer + mb_uv_offset, f->uv_stride,
296
297
                                    predictor + 512,
                                    mb_uv_width, mb_uv_height, strength,
Alex Converse's avatar
Alex Converse committed
298
299
                                    filter_weight, accumulator + 512,
                                    count + 512);
John Koleszar's avatar
John Koleszar committed
300
301
302
303
304
305
306
307
308
309
        }
      }

      // Normalize filter output to produce AltRef frame
      dst1 = cpi->alt_ref_buffer.y_buffer;
      stride = cpi->alt_ref_buffer.y_stride;
      byte = mb_y_offset;
      for (i = 0, k = 0; i < 16; i++) {
        for (j = 0; j < 16; j++, k++) {
          unsigned int pval = accumulator[k] + (count[k] >> 1);
310
          pval *= fixed_divide[count[k]];
John Koleszar's avatar
John Koleszar committed
311
312
          pval >>= 19;

313
          dst1[byte] = (uint8_t)pval;
John Koleszar's avatar
John Koleszar committed
314
315
316
317
318
319
320
321
322
323
324

          // move to next pixel
          byte++;
        }
        byte += stride - 16;
      }

      dst1 = cpi->alt_ref_buffer.u_buffer;
      dst2 = cpi->alt_ref_buffer.v_buffer;
      stride = cpi->alt_ref_buffer.uv_stride;
      byte = mb_uv_offset;
Alex Converse's avatar
Alex Converse committed
325
      for (i = 0, k = 256; i < mb_uv_height; i++) {
326
        for (j = 0; j < mb_uv_width; j++, k++) {
Alex Converse's avatar
Alex Converse committed
327
          int m = k + 256;
John Koleszar's avatar
John Koleszar committed
328
329
330

          // U
          unsigned int pval = accumulator[k] + (count[k] >> 1);
331
          pval *= fixed_divide[count[k]];
John Koleszar's avatar
John Koleszar committed
332
          pval >>= 19;
333
          dst1[byte] = (uint8_t)pval;
John Koleszar's avatar
John Koleszar committed
334
335
336

          // V
          pval = accumulator[m] + (count[m] >> 1);
337
          pval *= fixed_divide[count[m]];
John Koleszar's avatar
John Koleszar committed
338
          pval >>= 19;
339
          dst2[byte] = (uint8_t)pval;
John Koleszar's avatar
John Koleszar committed
340
341
342

          // move to next pixel
          byte++;
Johann's avatar
Johann committed
343
        }
344
        byte += stride - mb_uv_width;
John Koleszar's avatar
John Koleszar committed
345
346
      }
      mb_y_offset += 16;
347
      mb_uv_offset += mb_uv_width;
Johann's avatar
Johann committed
348
    }
John Koleszar's avatar
John Koleszar committed
349
    mb_y_offset += 16 * (f->y_stride - mb_cols);
350
    mb_uv_offset += mb_uv_height * f->uv_stride - mb_uv_width * mb_cols;
John Koleszar's avatar
John Koleszar committed
351
352
353
  }

  // Restore input state
354
355
  for (i = 0; i < MAX_MB_PLANE; i++)
    mbd->plane[i].pre[0].buf = input_buffer[i];
Johann's avatar
Johann committed
356
357
}

358
359
360
361
362
363
364
365
// Apply buffer limits and context specific adjustments to arnr filter.
static void adjust_arnr_filter(VP9_COMP *cpi,
                               int distance, int group_boost) {
  const int frames_after_arf =
            vp9_lookahead_depth(cpi->lookahead) - distance - 1;
  int frames_fwd = (cpi->oxcf.arnr_max_frames - 1) >> 1;
  int frames_bwd;
  int q;
John Koleszar's avatar
John Koleszar committed
366

367
368
369
370
371
  // Define the forward and backwards filter limits for this arnr group.
  if (frames_fwd > frames_after_arf)
    frames_fwd = frames_after_arf;
  if (frames_fwd > distance)
    frames_fwd = distance;
372

373
  frames_bwd = frames_fwd;
374

375
376
377
378
  // For even length filter there is one more frame backward
  // than forward: e.g. len=6 ==> bbbAff, len=7 ==> bbbAfff.
  if (frames_bwd < distance)
    frames_bwd += (cpi->oxcf.arnr_max_frames + 1) & 0x1;
379

380
  // Set the baseline active filter size.
381
382
  cpi->active_arnr_frames = frames_bwd + 1 + frames_fwd;

383
  // Adjust the strength based on active max q.
384
  if (cpi->common.current_video_frame > 1)
385
386
    q = ((int)vp9_convert_qindex_to_q(
        cpi->rc.avg_frame_qindex[INTER_FRAME]));
387
  else
388
389
    q = ((int)vp9_convert_qindex_to_q(
        cpi->rc.avg_frame_qindex[KEY_FRAME]));
390
  if (q > 16) {
391
392
    cpi->active_arnr_strength = cpi->oxcf.arnr_strength;
  } else {
393
    cpi->active_arnr_strength = cpi->oxcf.arnr_strength - ((16 - q) / 2);
394
395
396
397
398
399
400
401
402
403
404
405
    if (cpi->active_arnr_strength < 0)
      cpi->active_arnr_strength = 0;
  }

  // Adjust number of frames in filter and strength based on gf boost level.
  if (cpi->active_arnr_frames > (group_boost / 150)) {
    cpi->active_arnr_frames = (group_boost / 150);
    cpi->active_arnr_frames += !(cpi->active_arnr_frames & 1);
  }
  if (cpi->active_arnr_strength > (group_boost / 300)) {
    cpi->active_arnr_strength = (group_boost / 300);
  }
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443

  // Adjustments for second level arf in multi arf case.
  if (cpi->pass == 2 && cpi->multi_arf_allowed) {
    const GF_GROUP *const gf_group = &cpi->twopass.gf_group;
    if (gf_group->rf_level[gf_group->index] != GF_ARF_STD) {
      cpi->active_arnr_strength >>= 1;
    }
  }
}

void vp9_temporal_filter(VP9_COMP *cpi, int distance) {
  VP9_COMMON *const cm = &cpi->common;
  RATE_CONTROL *const rc = &cpi->rc;
  int frame;
  int frames_to_blur;
  int start_frame;
  int strength;
  int frames_to_blur_backward;
  int frames_to_blur_forward;
  struct scale_factors sf;

  // Apply context specific adjustments to the arnr filter parameters.
  adjust_arnr_filter(cpi, distance, rc->gfu_boost);
  strength = cpi->active_arnr_strength;
  frames_to_blur = cpi->active_arnr_frames;
  frames_to_blur_backward = (frames_to_blur / 2);
  frames_to_blur_forward = ((frames_to_blur - 1) / 2);
  start_frame = distance + frames_to_blur_forward;

  // Setup frame pointers, NULL indicates frame not included in filter.
  vp9_zero(cpi->frames);
  for (frame = 0; frame < frames_to_blur; ++frame) {
    const int which_buffer = start_frame - frame;
    struct lookahead_entry *buf = vp9_lookahead_peek(cpi->lookahead,
                                                     which_buffer);
    cpi->frames[frames_to_blur - 1 - frame] = &buf->img;
  }

444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
  // Setup scaling factors. Scaling on each of the arnr frames is not supported
  if (cpi->use_svc && cpi->svc.number_temporal_layers == 1) {
    // In spatial svc the scaling factors might be less then 1/2. So we will use
    // non-normative scaling.
    int frame_used = 0;
    vp9_setup_scale_factors_for_frame(&sf,
                                      get_frame_new_buffer(cm)->y_crop_width,
                                      get_frame_new_buffer(cm)->y_crop_height,
                                      get_frame_new_buffer(cm)->y_crop_width,
                                      get_frame_new_buffer(cm)->y_crop_height);

    for (frame = 0; frame < frames_to_blur; ++frame) {
      if (cm->mi_cols * MI_SIZE != cpi->frames[frame]->y_width ||
          cm->mi_rows * MI_SIZE != cpi->frames[frame]->y_height) {
        if (vp9_realloc_frame_buffer(&cpi->svc.scaled_frames[frame_used],
                                     cm->width, cm->height,
                                     cm->subsampling_x, cm->subsampling_y,
                                     VP9_ENC_BORDER_IN_PIXELS, NULL, NULL,
                                     NULL))
          vpx_internal_error(&cm->error, VPX_CODEC_MEM_ERROR,
                             "Failed to reallocate alt_ref_buffer");

        cpi->frames[frame] =
            vp9_scale_if_required(cm, cpi->frames[frame],
                                  &cpi->svc.scaled_frames[frame_used]);
        ++frame_used;
      }
    }
  } else {
    vp9_setup_scale_factors_for_frame(&sf,
                                      get_frame_new_buffer(cm)->y_crop_width,
                                      get_frame_new_buffer(cm)->y_crop_height,
                                      cm->width, cm->height);
  }

479
480
  temporal_filter_iterate_c(cpi, frames_to_blur, frames_to_blur_backward,
                            strength, &sf);
481
}