global_motion.c 11.7 KB
Newer Older
1
/*
2
 * Copyright (c) 2016, Alliance for Open Media. All rights reserved
3
 *
4 5 6 7 8 9
 * This source code is subject to the terms of the BSD 2 Clause License and
 * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
 * was not distributed with this source code in the LICENSE file, you can
 * obtain it at www.aomedia.org/license/software. If the Alliance for Open
 * Media Patent License 1.0 was not distributed with this source code in the
 * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
10 11 12 13 14 15 16 17
 */

#include <stdio.h>
#include <stdlib.h>
#include <memory.h>
#include <math.h>
#include <assert.h>

18 19
#include "av1/encoder/global_motion.h"

20
#include "av1/common/warped_motion.h"
21

22 23 24 25
#include "av1/encoder/segmentation.h"
#include "av1/encoder/corner_detect.h"
#include "av1/encoder/corner_match.h"
#include "av1/encoder/ransac.h"
26

27 28 29
#define MAX_CORNERS 4096
#define MIN_INLIER_PROB 0.1

30 31 32 33 34
#define MIN_TRANS_THRESH (1 * GM_TRANS_DECODE_FACTOR)

// Border over which to compute the global motion
#define ERRORADV_BORDER 0

35 36 37 38 39 40 41 42 43 44
const double gm_advantage_thresh[TRANS_TYPES] = {
  1.00,  // Identity (not used)
  0.85,  // Translation
  0.75,  // Rot zoom
  0.65,  // Affine
  0.65,  // Hor Trapezoid
  0.65,  // Ver Trapezoid
  0.50,  // Homography
};

45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118
void convert_to_params(const double *params, int32_t *model) {
  int i;
  int alpha_present = 0;
  model[0] = (int32_t)floor(params[0] * (1 << GM_TRANS_PREC_BITS) + 0.5);
  model[1] = (int32_t)floor(params[1] * (1 << GM_TRANS_PREC_BITS) + 0.5);
  model[0] = (int32_t)clamp(model[0], GM_TRANS_MIN, GM_TRANS_MAX) *
             GM_TRANS_DECODE_FACTOR;
  model[1] = (int32_t)clamp(model[1], GM_TRANS_MIN, GM_TRANS_MAX) *
             GM_TRANS_DECODE_FACTOR;

  for (i = 2; i < 6; ++i) {
    const int diag_value = ((i == 2 || i == 5) ? (1 << GM_ALPHA_PREC_BITS) : 0);
    model[i] = (int32_t)floor(params[i] * (1 << GM_ALPHA_PREC_BITS) + 0.5);
    model[i] =
        (int32_t)clamp(model[i] - diag_value, GM_ALPHA_MIN, GM_ALPHA_MAX);
    alpha_present |= (model[i] != 0);
    model[i] = (model[i] + diag_value) * GM_ALPHA_DECODE_FACTOR;
  }
  for (; i < 8; ++i) {
    model[i] = (int32_t)floor(params[i] * (1 << GM_ROW3HOMO_PREC_BITS) + 0.5);
    model[i] = (int32_t)clamp(model[i], GM_ROW3HOMO_MIN, GM_ROW3HOMO_MAX) *
               GM_ROW3HOMO_DECODE_FACTOR;
    alpha_present |= (model[i] != 0);
  }

  if (!alpha_present) {
    if (abs(model[0]) < MIN_TRANS_THRESH && abs(model[1]) < MIN_TRANS_THRESH) {
      model[0] = 0;
      model[1] = 0;
    }
  }
}

void convert_model_to_params(const double *params, WarpedMotionParams *model) {
  convert_to_params(params, model->wmmat);
  model->wmtype = get_gmtype(model);
}

// Adds some offset to a global motion parameter and handles
// all of the necessary precision shifts, clamping, and
// zero-centering.
int32_t add_param_offset(int param_index, int32_t param_value, int32_t offset) {
  const int scale_vals[3] = { GM_TRANS_PREC_DIFF, GM_ALPHA_PREC_DIFF,
                              GM_ROW3HOMO_PREC_DIFF };
  const int clamp_vals[3] = { GM_TRANS_MAX, GM_ALPHA_MAX, GM_ROW3HOMO_MAX };
  // type of param: 0 - translation, 1 - affine, 2 - homography
  const int param_type = (param_index < 2 ? 0 : (param_index < 6 ? 1 : 2));
  const int is_one_centered = (param_index == 2 || param_index == 5);

  // Make parameter zero-centered and offset the shift that was done to make
  // it compatible with the warped model
  param_value = (param_value - (is_one_centered << WARPEDMODEL_PREC_BITS)) >>
                scale_vals[param_type];
  // Add desired offset to the rescaled/zero-centered parameter
  param_value += offset;
  // Clamp the parameter so it does not overflow the number of bits allotted
  // to it in the bitstream
  param_value = (int32_t)clamp(param_value, -clamp_vals[param_type],
                               clamp_vals[param_type]);
  // Rescale the parameter to WARPEDMODEL_PRECISION_BITS so it is compatible
  // with the warped motion library
  param_value *= (1 << scale_vals[param_type]);

  // Undo the zero-centering step if necessary
  return param_value + (is_one_centered << WARPEDMODEL_PREC_BITS);
}

void force_wmtype(WarpedMotionParams *wm, TransformationType wmtype) {
  switch (wmtype) {
    case IDENTITY: wm->wmmat[0] = 0; wm->wmmat[1] = 0;
    case TRANSLATION:
      wm->wmmat[2] = 1 << WARPEDMODEL_PREC_BITS;
      wm->wmmat[3] = 0;
    case ROTZOOM: wm->wmmat[4] = -wm->wmmat[3]; wm->wmmat[5] = wm->wmmat[2];
119 120 121
    case AFFINE: wm->wmmat[6] = wm->wmmat[7] = 0; break;
    case HORTRAPEZOID: wm->wmmat[6] = wm->wmmat[4] = 0; break;
    case VERTRAPEZOID: wm->wmmat[7] = wm->wmmat[3] = 0; break;
122 123 124 125 126 127 128 129 130 131 132 133 134 135
    case HOMOGRAPHY: break;
    default: assert(0);
  }
  wm->wmtype = wmtype;
}

double refine_integerized_param(WarpedMotionParams *wm,
                                TransformationType wmtype,
#if CONFIG_AOM_HIGHBITDEPTH
                                int use_hbd, int bd,
#endif  // CONFIG_AOM_HIGHBITDEPTH
                                uint8_t *ref, int r_width, int r_height,
                                int r_stride, uint8_t *dst, int d_width,
                                int d_height, int d_stride, int n_refinements) {
136 137 138
  static const int max_trans_model_params[TRANS_TYPES] = {
    0, 2, 4, 6, 8, 8, 8
  };
139 140
  const int border = ERRORADV_BORDER;
  int i = 0, p;
141
  int n_params = max_trans_model_params[wmtype];
142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162
  int32_t *param_mat = wm->wmmat;
  double step_error;
  int32_t step;
  int32_t *param;
  int32_t curr_param;
  int32_t best_param;
  double best_error;

  force_wmtype(wm, wmtype);
  best_error = av1_warp_erroradv(wm,
#if CONFIG_AOM_HIGHBITDEPTH
                                 use_hbd, bd,
#endif  // CONFIG_AOM_HIGHBITDEPTH
                                 ref, r_width, r_height, r_stride,
                                 dst + border * d_stride + border, border,
                                 border, d_width - 2 * border,
                                 d_height - 2 * border, d_stride, 0, 0, 16, 16);
  step = 1 << (n_refinements + 1);
  for (i = 0; i < n_refinements; i++, step >>= 1) {
    for (p = 0; p < n_params; ++p) {
      int step_dir = 0;
163 164 165
      // Skip searches for parameters that are forced to be 0
      if (wmtype == HORTRAPEZOID && (p == 4 || p == 6)) continue;
      if (wmtype == VERTRAPEZOID && (p == 3 || p == 7)) continue;
166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228
      param = param_mat + p;
      curr_param = *param;
      best_param = curr_param;
      // look to the left
      *param = add_param_offset(p, curr_param, -step);
      step_error = av1_warp_erroradv(
          wm,
#if CONFIG_AOM_HIGHBITDEPTH
          use_hbd, bd,
#endif  // CONFIG_AOM_HIGHBITDEPTH
          ref, r_width, r_height, r_stride, dst + border * d_stride + border,
          border, border, d_width - 2 * border, d_height - 2 * border, d_stride,
          0, 0, 16, 16);
      if (step_error < best_error) {
        best_error = step_error;
        best_param = *param;
        step_dir = -1;
      }

      // look to the right
      *param = add_param_offset(p, curr_param, step);
      step_error = av1_warp_erroradv(
          wm,
#if CONFIG_AOM_HIGHBITDEPTH
          use_hbd, bd,
#endif  // CONFIG_AOM_HIGHBITDEPTH
          ref, r_width, r_height, r_stride, dst + border * d_stride + border,
          border, border, d_width - 2 * border, d_height - 2 * border, d_stride,
          0, 0, 16, 16);
      if (step_error < best_error) {
        best_error = step_error;
        best_param = *param;
        step_dir = 1;
      }
      *param = best_param;

      // look to the direction chosen above repeatedly until error increases
      // for the biggest step size
      while (step_dir) {
        *param = add_param_offset(p, best_param, step * step_dir);
        step_error = av1_warp_erroradv(
            wm,
#if CONFIG_AOM_HIGHBITDEPTH
            use_hbd, bd,
#endif  // CONFIG_AOM_HIGHBITDEPTH
            ref, r_width, r_height, r_stride, dst + border * d_stride + border,
            border, border, d_width - 2 * border, d_height - 2 * border,
            d_stride, 0, 0, 16, 16);
        if (step_error < best_error) {
          best_error = step_error;
          best_param = *param;
        } else {
          *param = best_param;
          step_dir = 0;
        }
      }
    }
  }
  force_wmtype(wm, wmtype);
  wm->wmtype = get_gmtype(wm);
  return best_error;
}

229
static INLINE RansacFunc get_ransac_type(TransformationType type) {
230
  switch (type) {
231
    case HOMOGRAPHY: return ransac_homography;
232 233
    case HORTRAPEZOID: return ransac_hortrapezoid;
    case VERTRAPEZOID: return ransac_vertrapezoid;
234 235 236
    case AFFINE: return ransac_affine;
    case ROTZOOM: return ransac_rotzoom;
    case TRANSLATION: return ransac_translation;
237 238 239 240 241 242
    default: assert(0); return NULL;
  }
}

// computes global motion parameters by fitting a model using RANSAC
static int compute_global_motion_params(TransformationType type,
243
                                        int *correspondences,
244 245
                                        int num_correspondences,
                                        double *params) {
246 247
  int result;
  int num_inliers = 0;
248
  RansacFunc ransac = get_ransac_type(type);
249 250
  if (ransac == NULL) return 0;

251
  result = ransac(correspondences, num_correspondences, &num_inliers, params);
252 253 254 255 256 257 258
  if (!result && num_inliers < MIN_INLIER_PROB * num_correspondences) {
    result = 1;
    num_inliers = 0;
  }
  return num_inliers;
}

259 260 261 262 263 264 265 266 267 268 269 270 271 272 273
#if CONFIG_AOM_HIGHBITDEPTH
unsigned char *downconvert_frame(YV12_BUFFER_CONFIG *frm, int bit_depth) {
  int i, j;
  uint16_t *orig_buf = CONVERT_TO_SHORTPTR(frm->y_buffer);
  uint8_t *buf = malloc(frm->y_height * frm->y_stride * sizeof(*buf));

  for (i = 0; i < frm->y_height; ++i)
    for (j = 0; j < frm->y_width; ++j)
      buf[i * frm->y_stride + j] =
          orig_buf[i * frm->y_stride + j] >> (bit_depth - 8);

  return buf;
}
#endif

274
int compute_global_motion_feature_based(TransformationType type,
275 276
                                        YV12_BUFFER_CONFIG *frm,
                                        YV12_BUFFER_CONFIG *ref,
277 278 279
#if CONFIG_AOM_HIGHBITDEPTH
                                        int bit_depth,
#endif
280 281 282
                                        double *params) {
  int num_frm_corners, num_ref_corners;
  int num_correspondences;
283
  int *correspondences;
284 285
  int num_inliers;
  int frm_corners[2 * MAX_CORNERS], ref_corners[2 * MAX_CORNERS];
286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304
  unsigned char *frm_buffer = frm->y_buffer;
  unsigned char *ref_buffer = ref->y_buffer;

#if CONFIG_AOM_HIGHBITDEPTH
  if (frm->flags & YV12_FLAG_HIGHBITDEPTH) {
    // The frame buffer is 16-bit, so we need to convert to 8 bits for the
    // following code. We cache the result until the frame is released.
    if (frm->y_buffer_8bit)
      frm_buffer = frm->y_buffer_8bit;
    else
      frm_buffer = frm->y_buffer_8bit = downconvert_frame(frm, bit_depth);
  }
  if (ref->flags & YV12_FLAG_HIGHBITDEPTH) {
    if (ref->y_buffer_8bit)
      ref_buffer = ref->y_buffer_8bit;
    else
      ref_buffer = ref->y_buffer_8bit = downconvert_frame(ref, bit_depth);
  }
#endif
305 306

  // compute interest points in images using FAST features
307 308 309 310
  num_frm_corners = fast_corner_detect(frm_buffer, frm->y_width, frm->y_height,
                                       frm->y_stride, frm_corners, MAX_CORNERS);
  num_ref_corners = fast_corner_detect(ref_buffer, ref->y_width, ref->y_height,
                                       ref->y_stride, ref_corners, MAX_CORNERS);
311 312 313

  // find correspondences between the two images
  correspondences =
314
      (int *)malloc(num_frm_corners * 4 * sizeof(*correspondences));
315
  num_correspondences = determine_correspondence(
316
      frm_buffer, (int *)frm_corners, num_frm_corners, ref_buffer,
317 318 319
      (int *)ref_corners, num_ref_corners, frm->y_width, frm->y_height,
      frm->y_stride, ref->y_stride, correspondences);

320 321
  num_inliers = compute_global_motion_params(type, correspondences,
                                             num_correspondences, params);
322 323
  free(correspondences);
  return (num_inliers > 0);
324
}