Commit 7ae7aeaf authored by Debargha Mukherjee's avatar Debargha Mukherjee
Browse files

Redo / refactor affine and rot-zoom least squares

Use a simpler least-squares function for affine and rotzoom
model estimation, instead of computing the pseudo inverse.
Also refactors the code into a separate mathutils.h file.

The SVD code is currently used only for estimation of the
homography models which can be removed when we remove the
homography models.

Coding efficiency change is in noise range, with the small
difference coming from numerical precision issues.

Change-Id: I0a9eb79495911cea21a7945b397d596e22a2a186
parent 9cc10654
......@@ -38,6 +38,7 @@ AV1_CX_SRCS-yes += encoder/ethread.h
AV1_CX_SRCS-yes += encoder/ethread.c
AV1_CX_SRCS-yes += encoder/extend.c
AV1_CX_SRCS-yes += encoder/firstpass.c
AV1_CX_SRCS-yes += encoder/mathutils.h
AV1_CX_SRCS-$(CONFIG_GLOBAL_MOTION) += ../third_party/fastfeat/fast.h
AV1_CX_SRCS-$(CONFIG_GLOBAL_MOTION) += ../third_party/fastfeat/nonmax.c
AV1_CX_SRCS-$(CONFIG_GLOBAL_MOTION) += ../third_party/fastfeat/fast_9.c
......
/*
* Copyright (c) 2017, Alliance for Open Media. All rights reserved
*
* This source code is subject to the terms of the BSD 2 Clause License and
* the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
* was not distributed with this source code in the LICENSE file, you can
* obtain it at www.aomedia.org/license/software. If the Alliance for Open
* Media Patent License 1.0 was not distributed with this source code in the
* PATENTS file, you can obtain it at www.aomedia.org/license/patent.
*/
#include <memory.h>
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include <assert.h>
static const double TINY_NEAR_ZERO = 1.0E-16;
// Solves Ax = b, where x and b are column vectors of size nx1 and A is nxn
static INLINE int linsolve(int n, double *A, int stride, double *b, double *x) {
int i, j, k;
double c;
// Forward elimination
for (k = 0; k < n - 1; k++) {
// Bring the largest magitude to the diagonal position
for (i = n - 1; i > k; i--) {
if (fabs(A[(i - 1) * stride + k]) < fabs(A[i * stride + k])) {
for (j = 0; j < n; j++) {
c = A[i * stride + j];
A[i * stride + j] = A[(i - 1) * stride + j];
A[(i - 1) * stride + j] = c;
}
c = b[i];
b[i] = b[i - 1];
b[i - 1] = c;
}
}
for (i = k; i < n - 1; i++) {
if (fabs(A[k * stride + k]) < TINY_NEAR_ZERO) return 0;
c = A[(i + 1) * stride + k] / A[k * stride + k];
for (j = 0; j < n; j++) A[(i + 1) * stride + j] -= c * A[k * stride + j];
b[i + 1] -= c * b[k];
}
}
// Backward substitution
for (i = n - 1; i >= 0; i--) {
if (fabs(A[i * stride + i]) < TINY_NEAR_ZERO) return 0;
c = 0;
for (j = i + 1; j <= n - 1; j++) c += A[i * stride + j] * x[j];
x[i] = (b[i] - c) / A[i * stride + i];
}
return 1;
}
////////////////////////////////////////////////////////////////////////////////
// Least-squares
// Solves for n-dim x in a least squares sense to minimize |Ax - b|^2
// The solution is simply x = (A'A)^-1 A'b or simply the solution for
// the system: A'A x = A'b
static INLINE int least_squares(int n, double *A, int rows, int stride,
double *b, double *scratch, double *x) {
int i, j, k;
double *scratch_ = NULL;
double *AtA, *Atb;
if (!scratch) {
scratch_ = (double *)aom_malloc(sizeof(*scratch) * n * (n + 1));
scratch = scratch_;
}
AtA = scratch;
Atb = scratch + n * n;
for (i = 0; i < n; ++i) {
for (j = i; j < n; ++j) {
AtA[i * n + j] = 0.0;
for (k = 0; k < rows; ++k)
AtA[i * n + j] += A[k * stride + i] * A[k * stride + j];
AtA[j * n + i] = AtA[i * n + j];
}
Atb[i] = 0;
for (k = 0; k < rows; ++k) Atb[i] += A[k * stride + i] * b[k];
}
int ret = linsolve(n, AtA, n, Atb, x);
if (scratch_) aom_free(scratch_);
return ret;
}
// Matrix multiply
static INLINE void multiply_mat(const double *m1, const double *m2, double *res,
const int m1_rows, const int inner_dim,
const int m2_cols) {
double sum;
int row, col, inner;
for (row = 0; row < m1_rows; ++row) {
for (col = 0; col < m2_cols; ++col) {
sum = 0;
for (inner = 0; inner < inner_dim; ++inner)
sum += m1[row * inner_dim + inner] * m2[inner * m2_cols + col];
*(res++) = sum;
}
}
}
//
// The functions below are needed only for homography computation
// Remove if the homography models are not used.
//
///////////////////////////////////////////////////////////////////////////////
// svdcmp
// Adopted from Numerical Recipes in C
static INLINE double sign(double a, double b) {
return ((b) >= 0 ? fabs(a) : -fabs(a));
}
static INLINE double pythag(double a, double b) {
double ct;
const double absa = fabs(a);
const double absb = fabs(b);
if (absa > absb) {
ct = absb / absa;
return absa * sqrt(1.0 + ct * ct);
} else {
ct = absa / absb;
return (absb == 0) ? 0 : absb * sqrt(1.0 + ct * ct);
}
}
static INLINE int svdcmp(double **u, int m, int n, double w[], double **v) {
const int max_its = 30;
int flag, i, its, j, jj, k, l, nm;
double anorm, c, f, g, h, s, scale, x, y, z;
double *rv1 = (double *)aom_malloc(sizeof(*rv1) * (n + 1));
g = scale = anorm = 0.0;
for (i = 0; i < n; i++) {
l = i + 1;
rv1[i] = scale * g;
g = s = scale = 0.0;
if (i < m) {
for (k = i; k < m; k++) scale += fabs(u[k][i]);
if (scale != 0.) {
for (k = i; k < m; k++) {
u[k][i] /= scale;
s += u[k][i] * u[k][i];
}
f = u[i][i];
g = -sign(sqrt(s), f);
h = f * g - s;
u[i][i] = f - g;
for (j = l; j < n; j++) {
for (s = 0.0, k = i; k < m; k++) s += u[k][i] * u[k][j];
f = s / h;
for (k = i; k < m; k++) u[k][j] += f * u[k][i];
}
for (k = i; k < m; k++) u[k][i] *= scale;
}
}
w[i] = scale * g;
g = s = scale = 0.0;
if (i < m && i != n - 1) {
for (k = l; k < n; k++) scale += fabs(u[i][k]);
if (scale != 0.) {
for (k = l; k < n; k++) {
u[i][k] /= scale;
s += u[i][k] * u[i][k];
}
f = u[i][l];
g = -sign(sqrt(s), f);
h = f * g - s;
u[i][l] = f - g;
for (k = l; k < n; k++) rv1[k] = u[i][k] / h;
for (j = l; j < m; j++) {
for (s = 0.0, k = l; k < n; k++) s += u[j][k] * u[i][k];
for (k = l; k < n; k++) u[j][k] += s * rv1[k];
}
for (k = l; k < n; k++) u[i][k] *= scale;
}
}
anorm = fmax(anorm, (fabs(w[i]) + fabs(rv1[i])));
}
for (i = n - 1; i >= 0; i--) {
if (i < n - 1) {
if (g != 0.) {
for (j = l; j < n; j++) v[j][i] = (u[i][j] / u[i][l]) / g;
for (j = l; j < n; j++) {
for (s = 0.0, k = l; k < n; k++) s += u[i][k] * v[k][j];
for (k = l; k < n; k++) v[k][j] += s * v[k][i];
}
}
for (j = l; j < n; j++) v[i][j] = v[j][i] = 0.0;
}
v[i][i] = 1.0;
g = rv1[i];
l = i;
}
for (i = AOMMIN(m, n) - 1; i >= 0; i--) {
l = i + 1;
g = w[i];
for (j = l; j < n; j++) u[i][j] = 0.0;
if (g != 0.) {
g = 1.0 / g;
for (j = l; j < n; j++) {
for (s = 0.0, k = l; k < m; k++) s += u[k][i] * u[k][j];
f = (s / u[i][i]) * g;
for (k = i; k < m; k++) u[k][j] += f * u[k][i];
}
for (j = i; j < m; j++) u[j][i] *= g;
} else {
for (j = i; j < m; j++) u[j][i] = 0.0;
}
++u[i][i];
}
for (k = n - 1; k >= 0; k--) {
for (its = 0; its < max_its; its++) {
flag = 1;
for (l = k; l >= 0; l--) {
nm = l - 1;
if ((double)(fabs(rv1[l]) + anorm) == anorm || nm < 0) {
flag = 0;
break;
}
if ((double)(fabs(w[nm]) + anorm) == anorm) break;
}
if (flag) {
c = 0.0;
s = 1.0;
for (i = l; i <= k; i++) {
f = s * rv1[i];
rv1[i] = c * rv1[i];
if ((double)(fabs(f) + anorm) == anorm) break;
g = w[i];
h = pythag(f, g);
w[i] = h;
h = 1.0 / h;
c = g * h;
s = -f * h;
for (j = 0; j < m; j++) {
y = u[j][nm];
z = u[j][i];
u[j][nm] = y * c + z * s;
u[j][i] = z * c - y * s;
}
}
}
z = w[k];
if (l == k) {
if (z < 0.0) {
w[k] = -z;
for (j = 0; j < n; j++) v[j][k] = -v[j][k];
}
break;
}
if (its == max_its - 1) {
aom_free(rv1);
return 1;
}
assert(k > 0);
x = w[l];
nm = k - 1;
y = w[nm];
g = rv1[nm];
h = rv1[k];
f = ((y - z) * (y + z) + (g - h) * (g + h)) / (2.0 * h * y);
g = pythag(f, 1.0);
f = ((x - z) * (x + z) + h * ((y / (f + sign(g, f))) - h)) / x;
c = s = 1.0;
for (j = l; j <= nm; j++) {
i = j + 1;
g = rv1[i];
y = w[i];
h = s * g;
g = c * g;
z = pythag(f, h);
rv1[j] = z;
c = f / z;
s = h / z;
f = x * c + g * s;
g = g * c - x * s;
h = y * s;
y *= c;
for (jj = 0; jj < n; jj++) {
x = v[jj][j];
z = v[jj][i];
v[jj][j] = x * c + z * s;
v[jj][i] = z * c - x * s;
}
z = pythag(f, h);
w[j] = z;
if (z != 0.) {
z = 1.0 / z;
c = f * z;
s = h * z;
}
f = c * g + s * y;
x = c * y - s * g;
for (jj = 0; jj < m; jj++) {
y = u[jj][j];
z = u[jj][i];
u[jj][j] = y * c + z * s;
u[jj][i] = z * c - y * s;
}
}
rv1[l] = 0.0;
rv1[k] = f;
w[k] = x;
}
}
aom_free(rv1);
return 0;
}
static INLINE int SVD(double *U, double *W, double *V, double *matx, int M,
int N) {
// Assumes allocation for U is MxN
double **nrU = (double **)aom_malloc((M) * sizeof(*nrU));
double **nrV = (double **)aom_malloc((N) * sizeof(*nrV));
int problem, i;
problem = !(nrU && nrV);
if (!problem) {
for (i = 0; i < M; i++) {
nrU[i] = &U[i * N];
}
for (i = 0; i < N; i++) {
nrV[i] = &V[i * N];
}
} else {
if (nrU) aom_free(nrU);
if (nrV) aom_free(nrV);
return 1;
}
/* copy from given matx into nrU */
for (i = 0; i < M; i++) {
memcpy(&(nrU[i][0]), matx + N * i, N * sizeof(*matx));
}
/* HERE IT IS: do SVD */
if (svdcmp(nrU, M, N, W, nrV)) {
aom_free(nrU);
aom_free(nrV);
return 1;
}
/* aom_free Numerical Recipes arrays */
aom_free(nrU);
aom_free(nrV);
return 0;
}
......@@ -31,6 +31,7 @@
#include "av1/encoder/encoder.h"
#include "av1/encoder/picklpf.h"
#include "av1/encoder/pickrst.h"
#include "av1/encoder/mathutils.h"
// When set to RESTORE_WIENER or RESTORE_SGRPROJ only those are allowed.
// When set to RESTORE_NONE (0) we allow switchable.
......@@ -329,8 +330,10 @@ static void search_selfguided_restoration(uint8_t *dat8, int width, int height,
#if CONFIG_HIGHBITDEPTH
}
#endif
aom_clear_system_state();
get_proj_subspace(src8, width, height, src_stride, dat8, dat_stride,
bit_depth, flt1, width, flt2, width, exq);
aom_clear_system_state();
encode_xq(exq, exqd);
err =
get_pixel_proj_error(src8, width, height, src_stride, dat8, dat_stride,
......@@ -560,46 +563,6 @@ static void compute_stats_highbd(uint8_t *dgd8, uint8_t *src8, int h_start,
}
#endif // CONFIG_HIGHBITDEPTH
// Solves Ax = b, where x and b are column vectors
static int linsolve(int n, double *A, int stride, double *b, double *x) {
int i, j, k;
double c;
aom_clear_system_state();
// Forward elimination
for (k = 0; k < n - 1; k++) {
// Bring the largest magitude to the diagonal position
for (i = n - 1; i > k; i--) {
if (fabs(A[(i - 1) * stride + k]) < fabs(A[i * stride + k])) {
for (j = 0; j < n; j++) {
c = A[i * stride + j];
A[i * stride + j] = A[(i - 1) * stride + j];
A[(i - 1) * stride + j] = c;
}
c = b[i];
b[i] = b[i - 1];
b[i - 1] = c;
}
}
for (i = k; i < n - 1; i++) {
if (fabs(A[k * stride + k]) < 1e-10) return 0;
c = A[(i + 1) * stride + k] / A[k * stride + k];
for (j = 0; j < n; j++) A[(i + 1) * stride + j] -= c * A[k * stride + j];
b[i + 1] -= c * b[k];
}
}
// Backward substitution
for (i = n - 1; i >= 0; i--) {
if (fabs(A[i * stride + i]) < 1e-10) return 0;
c = 0;
for (j = i + 1; j <= n - 1; j++) c += A[i * stride + j] * x[j];
x[i] = (b[i] - c) / A[i * stride + i];
}
return 1;
}
static INLINE int wrap_index(int i) {
return (i >= WIENER_HALFWIN1 ? WIENER_WIN - 1 - i : i);
}
......@@ -910,6 +873,7 @@ static double search_wiener_uv(const YV12_BUFFER_CONFIG *src, AV1_COMP *cpi,
type[tile_idx] = RESTORE_NONE;
continue;
}
aom_clear_system_state();
rsi[plane].restoration_type[tile_idx] = RESTORE_WIENER;
err = try_restoration_tile(src, cpi, rsi, 1 << plane, partial_frame,
......@@ -1052,6 +1016,7 @@ static double search_wiener(const YV12_BUFFER_CONFIG *src, AV1_COMP *cpi,
type[tile_idx] = RESTORE_NONE;
continue;
}
aom_clear_system_state();
rsi->restoration_type[tile_idx] = RESTORE_WIENER;
err = try_restoration_tile(src, cpi, rsi, 1, partial_frame, tile_idx, 0, 0,
......
......@@ -16,6 +16,7 @@
#include <assert.h>
#include "av1/encoder/ransac.h"
#include "av1/encoder/mathutils.h"
#define MAX_MINPTS 4
#define MAX_DEGENERATE_ITER 10
......@@ -132,309 +133,6 @@ static void project_points_double_homography(double *mat, double *points,
}
}
///////////////////////////////////////////////////////////////////////////////
// svdcmp
// Adopted from Numerical Recipes in C
static const double TINY_NEAR_ZERO = 1.0E-12;
static INLINE double sign(double a, double b) {
return ((b) >= 0 ? fabs(a) : -fabs(a));
}
static INLINE double pythag(double a, double b) {
double ct;
const double absa = fabs(a);
const double absb = fabs(b);
if (absa > absb) {
ct = absb / absa;
return absa * sqrt(1.0 + ct * ct);
} else {
ct = absa / absb;
return (absb == 0) ? 0 : absb * sqrt(1.0 + ct * ct);
}
}
static void multiply_mat(const double *m1, const double *m2, double *res,
const int m1_rows, const int inner_dim,
const int m2_cols) {
double sum;
int row, col, inner;
for (row = 0; row < m1_rows; ++row) {
for (col = 0; col < m2_cols; ++col) {
sum = 0;
for (inner = 0; inner < inner_dim; ++inner)
sum += m1[row * inner_dim + inner] * m2[inner * m2_cols + col];
*(res++) = sum;
}
}
}
static int svdcmp(double **u, int m, int n, double w[], double **v) {
const int max_its = 30;
int flag, i, its, j, jj, k, l, nm;
double anorm, c, f, g, h, s, scale, x, y, z;
double *rv1 = (double *)aom_malloc(sizeof(*rv1) * (n + 1));
g = scale = anorm = 0.0;
for (i = 0; i < n; i++) {
l = i + 1;
rv1[i] = scale * g;
g = s = scale = 0.0;
if (i < m) {
for (k = i; k < m; k++) scale += fabs(u[k][i]);
if (scale != 0.) {
for (k = i; k < m; k++) {
u[k][i] /= scale;
s += u[k][i] * u[k][i];
}
f = u[i][i];
g = -sign(sqrt(s), f);
h = f * g - s;
u[i][i] = f - g;
for (j = l; j < n; j++) {
for (s = 0.0, k = i; k < m; k++) s += u[k][i] * u[k][j];
f = s / h;
for (k = i; k < m; k++) u[k][j] += f * u[k][i];
}
for (k = i; k < m; k++) u[k][i] *= scale;
}
}
w[i] = scale * g;
g = s = scale = 0.0;
if (i < m && i != n - 1) {
for (k = l; k < n; k++) scale += fabs(u[i][k]);
if (scale != 0.) {
for (k = l; k < n; k++) {
u[i][k] /= scale;
s += u[i][k] * u[i][k];
}
f = u[i][l];
g = -sign(sqrt(s), f);
h = f * g - s;
u[i][l] = f - g;
for (k = l; k < n; k++) rv1[k] = u[i][k] / h;
for (j = l; j < m; j++) {
for (s = 0.0, k = l; k < n; k++) s += u[j][k] * u[i][k];
for (k = l; k < n; k++) u[j][k] += s * rv1[k];
}
for (k = l; k < n; k++) u[i][k] *= scale;
}
}
anorm = fmax(anorm, (fabs(w[i]) + fabs(rv1[i])));
}
for (i = n - 1; i >= 0; i--) {
if (i < n - 1) {
if (g != 0.) {
for (j = l; j < n; j++) v[j][i] = (u[i][j] / u[i][l]) / g;
for (j = l; j < n; j++) {
for (s = 0.0, k = l; k < n; k++) s += u[i][k] * v[k][j];
for (k = l; k < n; k++) v[k][j] += s * v[k][i];
}
}
for (j = l; j < n; j++) v[i][j] = v[j][i] = 0.0;
}
v[i][i] = 1.0;
g = rv1[i];
l = i;
}
for (i = AOMMIN(m, n) - 1; i >= 0; i--) {
l = i + 1;
g = w[i];
for (j = l; j < n; j++) u[i][j] = 0.0;
if (g != 0.) {
g = 1.0 / g;
for (j = l; j < n; j++) {
for (s = 0.0, k = l; k < m; k++) s += u[k][i] * u[k][j];
f = (s / u[i][i]) * g;
for (k = i; k < m; k++) u[k][j] += f * u[k][i];
}
for (j = i; j < m; j++) u[j][i] *= g;
} else {
for (j = i; j < m; j++) u[j][i] = 0.0;
}
++u[i][i];
}
for (k = n - 1; k >= 0; k--) {
for (its = 0; its < max_its; its++) {
flag = 1;