global_motion.c 11.6 KB
Newer Older
1
/*
2
 * Copyright (c) 2016, Alliance for Open Media. All rights reserved
3
 *
4 5 6 7 8 9
 * This source code is subject to the terms of the BSD 2 Clause License and
 * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
 * was not distributed with this source code in the LICENSE file, you can
 * obtain it at www.aomedia.org/license/software. If the Alliance for Open
 * Media Patent License 1.0 was not distributed with this source code in the
 * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
10 11 12 13 14 15 16 17
 */

#include <stdio.h>
#include <stdlib.h>
#include <memory.h>
#include <math.h>
#include <assert.h>

18
#include "av1/common/warped_motion.h"
19

20 21 22 23
#include "av1/encoder/segmentation.h"
#include "av1/encoder/corner_detect.h"
#include "av1/encoder/corner_match.h"
#include "av1/encoder/ransac.h"
24

25 26 27
#define MAX_CORNERS 4096
#define MIN_INLIER_PROB 0.1

28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106
#define MIN_TRANS_THRESH (1 * GM_TRANS_DECODE_FACTOR)

// Border over which to compute the global motion
#define ERRORADV_BORDER 0

void convert_to_params(const double *params, int32_t *model) {
  int i;
  int alpha_present = 0;
  model[0] = (int32_t)floor(params[0] * (1 << GM_TRANS_PREC_BITS) + 0.5);
  model[1] = (int32_t)floor(params[1] * (1 << GM_TRANS_PREC_BITS) + 0.5);
  model[0] = (int32_t)clamp(model[0], GM_TRANS_MIN, GM_TRANS_MAX) *
             GM_TRANS_DECODE_FACTOR;
  model[1] = (int32_t)clamp(model[1], GM_TRANS_MIN, GM_TRANS_MAX) *
             GM_TRANS_DECODE_FACTOR;

  for (i = 2; i < 6; ++i) {
    const int diag_value = ((i == 2 || i == 5) ? (1 << GM_ALPHA_PREC_BITS) : 0);
    model[i] = (int32_t)floor(params[i] * (1 << GM_ALPHA_PREC_BITS) + 0.5);
    model[i] =
        (int32_t)clamp(model[i] - diag_value, GM_ALPHA_MIN, GM_ALPHA_MAX);
    alpha_present |= (model[i] != 0);
    model[i] = (model[i] + diag_value) * GM_ALPHA_DECODE_FACTOR;
  }
  for (; i < 8; ++i) {
    model[i] = (int32_t)floor(params[i] * (1 << GM_ROW3HOMO_PREC_BITS) + 0.5);
    model[i] = (int32_t)clamp(model[i], GM_ROW3HOMO_MIN, GM_ROW3HOMO_MAX) *
               GM_ROW3HOMO_DECODE_FACTOR;
    alpha_present |= (model[i] != 0);
  }

  if (!alpha_present) {
    if (abs(model[0]) < MIN_TRANS_THRESH && abs(model[1]) < MIN_TRANS_THRESH) {
      model[0] = 0;
      model[1] = 0;
    }
  }
}

void convert_model_to_params(const double *params, WarpedMotionParams *model) {
  convert_to_params(params, model->wmmat);
  model->wmtype = get_gmtype(model);
}

// Adds some offset to a global motion parameter and handles
// all of the necessary precision shifts, clamping, and
// zero-centering.
int32_t add_param_offset(int param_index, int32_t param_value, int32_t offset) {
  const int scale_vals[3] = { GM_TRANS_PREC_DIFF, GM_ALPHA_PREC_DIFF,
                              GM_ROW3HOMO_PREC_DIFF };
  const int clamp_vals[3] = { GM_TRANS_MAX, GM_ALPHA_MAX, GM_ROW3HOMO_MAX };
  // type of param: 0 - translation, 1 - affine, 2 - homography
  const int param_type = (param_index < 2 ? 0 : (param_index < 6 ? 1 : 2));
  const int is_one_centered = (param_index == 2 || param_index == 5);

  // Make parameter zero-centered and offset the shift that was done to make
  // it compatible with the warped model
  param_value = (param_value - (is_one_centered << WARPEDMODEL_PREC_BITS)) >>
                scale_vals[param_type];
  // Add desired offset to the rescaled/zero-centered parameter
  param_value += offset;
  // Clamp the parameter so it does not overflow the number of bits allotted
  // to it in the bitstream
  param_value = (int32_t)clamp(param_value, -clamp_vals[param_type],
                               clamp_vals[param_type]);
  // Rescale the parameter to WARPEDMODEL_PRECISION_BITS so it is compatible
  // with the warped motion library
  param_value *= (1 << scale_vals[param_type]);

  // Undo the zero-centering step if necessary
  return param_value + (is_one_centered << WARPEDMODEL_PREC_BITS);
}

void force_wmtype(WarpedMotionParams *wm, TransformationType wmtype) {
  switch (wmtype) {
    case IDENTITY: wm->wmmat[0] = 0; wm->wmmat[1] = 0;
    case TRANSLATION:
      wm->wmmat[2] = 1 << WARPEDMODEL_PREC_BITS;
      wm->wmmat[3] = 0;
    case ROTZOOM: wm->wmmat[4] = -wm->wmmat[3]; wm->wmmat[5] = wm->wmmat[2];
107 108 109
    case AFFINE: wm->wmmat[6] = wm->wmmat[7] = 0; break;
    case HORTRAPEZOID: wm->wmmat[6] = wm->wmmat[4] = 0; break;
    case VERTRAPEZOID: wm->wmmat[7] = wm->wmmat[3] = 0; break;
110 111 112 113 114 115 116 117 118 119 120 121 122 123
    case HOMOGRAPHY: break;
    default: assert(0);
  }
  wm->wmtype = wmtype;
}

double refine_integerized_param(WarpedMotionParams *wm,
                                TransformationType wmtype,
#if CONFIG_AOM_HIGHBITDEPTH
                                int use_hbd, int bd,
#endif  // CONFIG_AOM_HIGHBITDEPTH
                                uint8_t *ref, int r_width, int r_height,
                                int r_stride, uint8_t *dst, int d_width,
                                int d_height, int d_stride, int n_refinements) {
124 125 126
  static const int max_trans_model_params[TRANS_TYPES] = {
    0, 2, 4, 6, 8, 8, 8
  };
127 128
  const int border = ERRORADV_BORDER;
  int i = 0, p;
129
  int n_params = max_trans_model_params[wmtype];
130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150
  int32_t *param_mat = wm->wmmat;
  double step_error;
  int32_t step;
  int32_t *param;
  int32_t curr_param;
  int32_t best_param;
  double best_error;

  force_wmtype(wm, wmtype);
  best_error = av1_warp_erroradv(wm,
#if CONFIG_AOM_HIGHBITDEPTH
                                 use_hbd, bd,
#endif  // CONFIG_AOM_HIGHBITDEPTH
                                 ref, r_width, r_height, r_stride,
                                 dst + border * d_stride + border, border,
                                 border, d_width - 2 * border,
                                 d_height - 2 * border, d_stride, 0, 0, 16, 16);
  step = 1 << (n_refinements + 1);
  for (i = 0; i < n_refinements; i++, step >>= 1) {
    for (p = 0; p < n_params; ++p) {
      int step_dir = 0;
151 152 153
      // Skip searches for parameters that are forced to be 0
      if (wmtype == HORTRAPEZOID && (p == 4 || p == 6)) continue;
      if (wmtype == VERTRAPEZOID && (p == 3 || p == 7)) continue;
154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216
      param = param_mat + p;
      curr_param = *param;
      best_param = curr_param;
      // look to the left
      *param = add_param_offset(p, curr_param, -step);
      step_error = av1_warp_erroradv(
          wm,
#if CONFIG_AOM_HIGHBITDEPTH
          use_hbd, bd,
#endif  // CONFIG_AOM_HIGHBITDEPTH
          ref, r_width, r_height, r_stride, dst + border * d_stride + border,
          border, border, d_width - 2 * border, d_height - 2 * border, d_stride,
          0, 0, 16, 16);
      if (step_error < best_error) {
        best_error = step_error;
        best_param = *param;
        step_dir = -1;
      }

      // look to the right
      *param = add_param_offset(p, curr_param, step);
      step_error = av1_warp_erroradv(
          wm,
#if CONFIG_AOM_HIGHBITDEPTH
          use_hbd, bd,
#endif  // CONFIG_AOM_HIGHBITDEPTH
          ref, r_width, r_height, r_stride, dst + border * d_stride + border,
          border, border, d_width - 2 * border, d_height - 2 * border, d_stride,
          0, 0, 16, 16);
      if (step_error < best_error) {
        best_error = step_error;
        best_param = *param;
        step_dir = 1;
      }
      *param = best_param;

      // look to the direction chosen above repeatedly until error increases
      // for the biggest step size
      while (step_dir) {
        *param = add_param_offset(p, best_param, step * step_dir);
        step_error = av1_warp_erroradv(
            wm,
#if CONFIG_AOM_HIGHBITDEPTH
            use_hbd, bd,
#endif  // CONFIG_AOM_HIGHBITDEPTH
            ref, r_width, r_height, r_stride, dst + border * d_stride + border,
            border, border, d_width - 2 * border, d_height - 2 * border,
            d_stride, 0, 0, 16, 16);
        if (step_error < best_error) {
          best_error = step_error;
          best_param = *param;
        } else {
          *param = best_param;
          step_dir = 0;
        }
      }
    }
  }
  force_wmtype(wm, wmtype);
  wm->wmtype = get_gmtype(wm);
  return best_error;
}

217
static INLINE RansacFunc get_ransac_type(TransformationType type) {
218
  switch (type) {
219
    case HOMOGRAPHY: return ransac_homography;
220 221
    case HORTRAPEZOID: return ransac_hortrapezoid;
    case VERTRAPEZOID: return ransac_vertrapezoid;
222 223 224
    case AFFINE: return ransac_affine;
    case ROTZOOM: return ransac_rotzoom;
    case TRANSLATION: return ransac_translation;
225 226 227 228 229 230
    default: assert(0); return NULL;
  }
}

// computes global motion parameters by fitting a model using RANSAC
static int compute_global_motion_params(TransformationType type,
231
                                        int *correspondences,
232 233 234 235
                                        int num_correspondences, double *params,
                                        int *inlier_map) {
  int result;
  int num_inliers = 0;
236
  RansacFunc ransac = get_ransac_type(type);
237 238 239 240 241 242 243 244 245 246 247
  if (ransac == NULL) return 0;

  result = ransac(correspondences, num_correspondences, &num_inliers,
                  inlier_map, params);
  if (!result && num_inliers < MIN_INLIER_PROB * num_correspondences) {
    result = 1;
    num_inliers = 0;
  }
  return num_inliers;
}

248 249 250 251 252 253 254 255 256 257 258 259 260 261 262
#if CONFIG_AOM_HIGHBITDEPTH
unsigned char *downconvert_frame(YV12_BUFFER_CONFIG *frm, int bit_depth) {
  int i, j;
  uint16_t *orig_buf = CONVERT_TO_SHORTPTR(frm->y_buffer);
  uint8_t *buf = malloc(frm->y_height * frm->y_stride * sizeof(*buf));

  for (i = 0; i < frm->y_height; ++i)
    for (j = 0; j < frm->y_width; ++j)
      buf[i * frm->y_stride + j] =
          orig_buf[i * frm->y_stride + j] >> (bit_depth - 8);

  return buf;
}
#endif

263
int compute_global_motion_feature_based(TransformationType type,
264 265
                                        YV12_BUFFER_CONFIG *frm,
                                        YV12_BUFFER_CONFIG *ref,
266 267 268
#if CONFIG_AOM_HIGHBITDEPTH
                                        int bit_depth,
#endif
269 270 271
                                        double *params) {
  int num_frm_corners, num_ref_corners;
  int num_correspondences;
272
  int *correspondences;
273 274 275
  int num_inliers;
  int frm_corners[2 * MAX_CORNERS], ref_corners[2 * MAX_CORNERS];
  int *inlier_map = NULL;
276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294
  unsigned char *frm_buffer = frm->y_buffer;
  unsigned char *ref_buffer = ref->y_buffer;

#if CONFIG_AOM_HIGHBITDEPTH
  if (frm->flags & YV12_FLAG_HIGHBITDEPTH) {
    // The frame buffer is 16-bit, so we need to convert to 8 bits for the
    // following code. We cache the result until the frame is released.
    if (frm->y_buffer_8bit)
      frm_buffer = frm->y_buffer_8bit;
    else
      frm_buffer = frm->y_buffer_8bit = downconvert_frame(frm, bit_depth);
  }
  if (ref->flags & YV12_FLAG_HIGHBITDEPTH) {
    if (ref->y_buffer_8bit)
      ref_buffer = ref->y_buffer_8bit;
    else
      ref_buffer = ref->y_buffer_8bit = downconvert_frame(ref, bit_depth);
  }
#endif
295 296

  // compute interest points in images using FAST features
297 298 299 300
  num_frm_corners = fast_corner_detect(frm_buffer, frm->y_width, frm->y_height,
                                       frm->y_stride, frm_corners, MAX_CORNERS);
  num_ref_corners = fast_corner_detect(ref_buffer, ref->y_width, ref->y_height,
                                       ref->y_stride, ref_corners, MAX_CORNERS);
301 302 303

  // find correspondences between the two images
  correspondences =
304
      (int *)malloc(num_frm_corners * 4 * sizeof(*correspondences));
305
  num_correspondences = determine_correspondence(
306
      frm_buffer, (int *)frm_corners, num_frm_corners, ref_buffer,
307 308 309 310 311 312 313 314 315
      (int *)ref_corners, num_ref_corners, frm->y_width, frm->y_height,
      frm->y_stride, ref->y_stride, correspondences);

  inlier_map = (int *)malloc(num_correspondences * sizeof(*inlier_map));
  num_inliers = compute_global_motion_params(
      type, correspondences, num_correspondences, params, inlier_map);
  free(correspondences);
  free(inlier_map);
  return (num_inliers > 0);
316
}