Commit 6697acfb authored by Sebastien Alaiwan's avatar Sebastien Alaiwan Committed by Urvang Joshi

Remove BGSPRITE experiment

This experiment has been abandonned for AV1.

Change-Id: I393e188420450b838fa53e8d7a4a00d41a4a2c6d
parent 6b172bba
......@@ -313,13 +313,6 @@ if (CONFIG_ACCOUNTING)
"${AOM_ROOT}/av1/decoder/accounting.h")
endif ()
if (CONFIG_BGSPRITE)
set(AOM_AV1_ENCODER_SOURCES
${AOM_AV1_ENCODER_SOURCES}
"${AOM_ROOT}/av1/encoder/bgsprite.c"
"${AOM_ROOT}/av1/encoder/bgsprite.h")
endif ()
set(AOM_AV1_ENCODER_SOURCES
${AOM_AV1_ENCODER_SOURCES}
"${AOM_ROOT}/av1/encoder/corner_detect.c"
......
/*
* Copyright (c) 2017, Alliance for Open Media. All rights reserved
*
* This source code is subject to the terms of the BSD 2 Clause License and
* the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
* was not distributed with this source code in the LICENSE file, you can
* obtain it at www.aomedia.org/license/software. If the Alliance for Open
* Media Patent License 1.0 was not distributed with this source code in the
* PATENTS file, you can obtain it at www.aomedia.org/license/patent.
*/
#define _POSIX_C_SOURCE 200112L // rand_r()
#include <assert.h>
#include <float.h>
#include <limits.h>
#include <math.h>
#include <stdlib.h>
#include <time.h>
#include "av1/encoder/bgsprite.h"
#include "aom_mem/aom_mem.h"
#include "./aom_scale_rtcd.h"
#include "av1/common/mv.h"
#include "av1/common/warped_motion.h"
#include "av1/encoder/encoder.h"
#include "av1/encoder/global_motion.h"
#include "av1/encoder/mathutils.h"
#include "av1/encoder/temporal_filter.h"
/* Blending Modes:
* 0 = Median
* 1 = Mean
*/
#define BGSPRITE_BLENDING_MODE 1
// Enable removal of outliers from mean blending mode.
#if BGSPRITE_BLENDING_MODE == 1
#define BGSPRITE_MEAN_REMOVE_OUTLIERS 0
#endif // BGSPRITE_BLENDING_MODE == 1
/* Interpolation for panorama alignment sampling:
* 0 = Nearest neighbor
* 1 = Bilinear
*/
#define BGSPRITE_INTERPOLATION 0
// Enable turning off bgsprite from firstpass metrics in define_gf_group.
#define BGSPRITE_ENABLE_METRICS 1
// Enable foreground/backgrond segmentation and combine with temporal filter.
#define BGSPRITE_ENABLE_SEGMENTATION 1
// Enable alignment using global motion.
#define BGSPRITE_ENABLE_GME 0
// Block size for foreground mask.
#define BGSPRITE_MASK_BLOCK_SIZE 4
typedef struct {
uint16_t y;
uint16_t u;
uint16_t v;
uint8_t exists;
} YuvPixel;
typedef struct {
int curr_model;
double mean[2];
double var[2];
int age[2];
double u_mean[2];
double v_mean[2];
uint16_t y;
uint16_t u;
uint16_t v;
double final_var;
} YuvPixelGaussian;
// Maps to convert from matrix form to param vector form.
static const int params_to_matrix_map[] = { 2, 3, 0, 4, 5, 1, 6, 7 };
static const int matrix_to_params_map[] = { 2, 5, 0, 1, 3, 4, 6, 7 };
// Convert the parameter array to a 3x3 matrix form.
static void params_to_matrix(const double *const params, double *target) {
for (int i = 0; i < MAX_PARAMDIM - 1; i++) {
assert(params_to_matrix_map[i] < MAX_PARAMDIM - 1);
target[i] = params[params_to_matrix_map[i]];
}
target[8] = 1;
}
// Convert a 3x3 matrix to a parameter array form.
static void matrix_to_params(const double *const matrix, double *target) {
for (int i = 0; i < MAX_PARAMDIM - 1; i++) {
assert(matrix_to_params_map[i] < MAX_PARAMDIM - 1);
target[i] = matrix[matrix_to_params_map[i]];
}
}
#define TRANSFORM_MAT_DIM 3
// Do matrix multiplication on params.
static void multiply_params(double *const m1, double *const m2,
double *target) {
double m1_matrix[MAX_PARAMDIM];
double m2_matrix[MAX_PARAMDIM];
double result[MAX_PARAMDIM];
params_to_matrix(m1, m1_matrix);
params_to_matrix(m2, m2_matrix);
multiply_mat(m2_matrix, m1_matrix, result, TRANSFORM_MAT_DIM,
TRANSFORM_MAT_DIM, TRANSFORM_MAT_DIM);
matrix_to_params(result, target);
}
// Finds x and y limits of a single transformed image.
// Width and height are the size of the input video.
static void find_frame_limit(int width, int height,
const double *const transform, int *x_min,
int *x_max, int *y_min, int *y_max) {
double transform_matrix[MAX_PARAMDIM];
double xy_matrix[3] = { 0, 0, 1 };
double uv_matrix[3] = { 0 };
// Macro used to update frame limits based on transformed coordinates.
#define UPDATELIMITS(u, v, x_min, x_max, y_min, y_max) \
{ \
if ((int)ceil(u) > *x_max) { \
*x_max = (int)ceil(u); \
} \
if ((int)floor(u) < *x_min) { \
*x_min = (int)floor(u); \
} \
if ((int)ceil(v) > *y_max) { \
*y_max = (int)ceil(v); \
} \
if ((int)floor(v) < *y_min) { \
*y_min = (int)floor(v); \
} \
}
params_to_matrix(transform, transform_matrix);
xy_matrix[0] = 0;
xy_matrix[1] = 0;
multiply_mat(transform_matrix, xy_matrix, uv_matrix, TRANSFORM_MAT_DIM,
TRANSFORM_MAT_DIM, 1);
*x_max = (int)ceil(uv_matrix[0]);
*x_min = (int)floor(uv_matrix[0]);
*y_max = (int)ceil(uv_matrix[1]);
*y_min = (int)floor(uv_matrix[1]);
xy_matrix[0] = width - 1;
xy_matrix[1] = 0;
multiply_mat(transform_matrix, xy_matrix, uv_matrix, TRANSFORM_MAT_DIM,
TRANSFORM_MAT_DIM, 1);
UPDATELIMITS(uv_matrix[0], uv_matrix[1], x_min, x_max, y_min, y_max);
xy_matrix[0] = width - 1;
xy_matrix[1] = height - 1;
multiply_mat(transform_matrix, xy_matrix, uv_matrix, TRANSFORM_MAT_DIM,
TRANSFORM_MAT_DIM, 1);
UPDATELIMITS(uv_matrix[0], uv_matrix[1], x_min, x_max, y_min, y_max);
xy_matrix[0] = 0;
xy_matrix[1] = height - 1;
multiply_mat(transform_matrix, xy_matrix, uv_matrix, TRANSFORM_MAT_DIM,
TRANSFORM_MAT_DIM, 1);
UPDATELIMITS(uv_matrix[0], uv_matrix[1], x_min, x_max, y_min, y_max);
#undef UPDATELIMITS
}
// Finds x and y limits for arrays. Also finds the overall max and minimums
static void find_limits(int width, int height, const double **const params,
int num_frames, int *x_min, int *x_max, int *y_min,
int *y_max, int *pano_x_min, int *pano_x_max,
int *pano_y_min, int *pano_y_max) {
*pano_x_max = INT_MIN;
*pano_x_min = INT_MAX;
*pano_y_max = INT_MIN;
*pano_y_min = INT_MAX;
for (int i = 0; i < num_frames; ++i) {
find_frame_limit(width, height, (const double *const)params[i], &x_min[i],
&x_max[i], &y_min[i], &y_max[i]);
if (x_max[i] > *pano_x_max) {
*pano_x_max = x_max[i];
}
if (x_min[i] < *pano_x_min) {
*pano_x_min = x_min[i];
}
if (y_max[i] > *pano_y_max) {
*pano_y_max = y_max[i];
}
if (y_min[i] < *pano_y_min) {
*pano_y_min = y_min[i];
}
}
}
// Inverts a 3x3 matrix that is in the parameter form.
static void invert_params(const double *const params, double *target) {
double temp[MAX_PARAMDIM] = { 0 };
params_to_matrix(params, temp);
// Find determinant of matrix (expansion by minors).
const double det = temp[0] * ((temp[4] * temp[8]) - (temp[5] * temp[7])) -
temp[1] * ((temp[3] * temp[8]) - (temp[5] * temp[6])) +
temp[2] * ((temp[3] * temp[7]) - (temp[4] * temp[6]));
assert(det != 0);
// inverse is transpose of cofactor * 1/det.
double inverse[MAX_PARAMDIM] = { 0 };
inverse[0] = (temp[4] * temp[8] - temp[7] * temp[5]) / det;
inverse[1] = (temp[2] * temp[7] - temp[1] * temp[8]) / det;
inverse[2] = (temp[1] * temp[5] - temp[2] * temp[4]) / det;
inverse[3] = (temp[5] * temp[6] - temp[3] * temp[8]) / det;
inverse[4] = (temp[0] * temp[8] - temp[2] * temp[6]) / det;
inverse[5] = (temp[3] * temp[2] - temp[0] * temp[5]) / det;
inverse[6] = (temp[3] * temp[7] - temp[6] * temp[4]) / det;
inverse[7] = (temp[6] * temp[1] - temp[0] * temp[7]) / det;
inverse[8] = (temp[0] * temp[4] - temp[3] * temp[1]) / det;
matrix_to_params(inverse, target);
}
static void build_image_stack(YV12_BUFFER_CONFIG **const frames,
const int num_frames, const double **const params,
const int *const x_min, const int *const x_max,
const int *const y_min, const int *const y_max,
int pano_x_min, int pano_y_min,
YuvPixel ***img_stack) {
// Re-sample images onto panorama (pre-filtering).
const int x_offset = -pano_x_min;
const int y_offset = -pano_y_min;
const int frame_width = frames[0]->y_width;
const int frame_height = frames[0]->y_height;
for (int i = 0; i < num_frames; ++i) {
// Find transforms from panorama coordinate system back to single image
// coordinate system for sampling.
int transformed_width = x_max[i] - x_min[i] + 1;
int transformed_height = y_max[i] - y_min[i] + 1;
double transform_matrix[MAX_PARAMDIM];
double transform_params[MAX_PARAMDIM - 1];
invert_params(params[i], transform_params);
params_to_matrix(transform_params, transform_matrix);
const uint16_t *y_buffer16 = CONVERT_TO_SHORTPTR(frames[i]->y_buffer);
const uint16_t *u_buffer16 = CONVERT_TO_SHORTPTR(frames[i]->u_buffer);
const uint16_t *v_buffer16 = CONVERT_TO_SHORTPTR(frames[i]->v_buffer);
for (int y = 0; y < transformed_height; ++y) {
for (int x = 0; x < transformed_width; ++x) {
// Do transform.
double xy_matrix[3] = { x + x_min[i], y + y_min[i], 1 };
double uv_matrix[3] = { 0 };
multiply_mat(transform_matrix, xy_matrix, uv_matrix, TRANSFORM_MAT_DIM,
TRANSFORM_MAT_DIM, 1);
// Coordinates used for nearest neighbor interpolation.
int image_x = (int)round(uv_matrix[0]);
int image_y = (int)round(uv_matrix[1]);
// Temporary values for bilinear interpolation
double interpolated_yvalue = 0.0;
double interpolated_uvalue = 0.0;
double interpolated_vvalue = 0.0;
double interpolated_fraction = 0.0;
int interpolation_count = 0;
#if BGSPRITE_INTERPOLATION == 1
// Coordintes used for bilinear interpolation.
double x_base;
double y_base;
double x_decimal = modf(uv_matrix[0], &x_base);
double y_decimal = modf(uv_matrix[1], &y_base);
if ((x_decimal > 0.2 && x_decimal < 0.8) ||
(y_decimal > 0.2 && y_decimal < 0.8)) {
for (int u = 0; u < 2; ++u) {
for (int v = 0; v < 2; ++v) {
int interp_x = (int)x_base + u;
int interp_y = (int)y_base + v;
if (interp_x >= 0 && interp_x < frame_width && interp_y >= 0 &&
interp_y < frame_height) {
interpolation_count++;
interpolated_fraction +=
fabs(u - x_decimal) * fabs(v - y_decimal);
int ychannel_idx = interp_y * frames[i]->y_stride + interp_x;
int uvchannel_idx = (interp_y >> frames[i]->subsampling_y) *
frames[i]->uv_stride +
(interp_x >> frames[i]->subsampling_x);
if (frames[i]->flags & YV12_FLAG_HIGHBITDEPTH) {
interpolated_yvalue += (1 - fabs(u - x_decimal)) *
(1 - fabs(v - y_decimal)) *
y_buffer16[ychannel_idx];
interpolated_uvalue += (1 - fabs(u - x_decimal)) *
(1 - fabs(v - y_decimal)) *
u_buffer16[uvchannel_idx];
interpolated_vvalue += (1 - fabs(u - x_decimal)) *
(1 - fabs(v - y_decimal)) *
v_buffer16[uvchannel_idx];
} else {
interpolated_yvalue += (1 - fabs(u - x_decimal)) *
(1 - fabs(v - y_decimal)) *
frames[i]->y_buffer[ychannel_idx];
interpolated_uvalue += (1 - fabs(u - x_decimal)) *
(1 - fabs(v - y_decimal)) *
frames[i]->u_buffer[uvchannel_idx];
interpolated_vvalue += (1 - fabs(u - x_decimal)) *
(1 - fabs(v - y_decimal)) *
frames[i]->v_buffer[uvchannel_idx];
}
}
}
}
}
#endif // BGSPRITE_INTERPOLATION == 1
if (BGSPRITE_INTERPOLATION && interpolation_count > 2) {
if (interpolation_count != 4) {
interpolated_yvalue /= interpolated_fraction;
interpolated_uvalue /= interpolated_fraction;
interpolated_vvalue /= interpolated_fraction;
}
int pano_x = x + x_min[i] + x_offset;
int pano_y = y + y_min[i] + y_offset;
if (frames[i]->flags & YV12_FLAG_HIGHBITDEPTH) {
img_stack[pano_y][pano_x][i].y = (uint16_t)interpolated_yvalue;
img_stack[pano_y][pano_x][i].u = (uint16_t)interpolated_uvalue;
img_stack[pano_y][pano_x][i].v = (uint16_t)interpolated_vvalue;
img_stack[pano_y][pano_x][i].exists = 1;
} else {
img_stack[pano_y][pano_x][i].y = (uint8_t)interpolated_yvalue;
img_stack[pano_y][pano_x][i].u = (uint8_t)interpolated_uvalue;
img_stack[pano_y][pano_x][i].v = (uint8_t)interpolated_vvalue;
img_stack[pano_y][pano_x][i].exists = 1;
}
} else if (image_x >= 0 && image_x < frame_width && image_y >= 0 &&
image_y < frame_height) {
// Place in panorama stack.
int pano_x = x + x_min[i] + x_offset;
int pano_y = y + y_min[i] + y_offset;
int ychannel_idx = image_y * frames[i]->y_stride + image_x;
int uvchannel_idx =
(image_y >> frames[i]->subsampling_y) * frames[i]->uv_stride +
(image_x >> frames[i]->subsampling_x);
if (frames[i]->flags & YV12_FLAG_HIGHBITDEPTH) {
img_stack[pano_y][pano_x][i].y = y_buffer16[ychannel_idx];
img_stack[pano_y][pano_x][i].u = u_buffer16[uvchannel_idx];
img_stack[pano_y][pano_x][i].v = v_buffer16[uvchannel_idx];
img_stack[pano_y][pano_x][i].exists = 1;
} else {
img_stack[pano_y][pano_x][i].y = frames[i]->y_buffer[ychannel_idx];
img_stack[pano_y][pano_x][i].u = frames[i]->u_buffer[uvchannel_idx];
img_stack[pano_y][pano_x][i].v = frames[i]->v_buffer[uvchannel_idx];
img_stack[pano_y][pano_x][i].exists = 1;
}
}
}
}
}
}
#if BGSPRITE_BLENDING_MODE == 0
// swaps two YuvPixels.
static void swap_yuv(YuvPixel *a, YuvPixel *b) {
const YuvPixel temp = *b;
*b = *a;
*a = temp;
}
// Partitions array to find pivot index in qselect.
static int partition(YuvPixel arr[], int left, int right, int pivot_idx) {
YuvPixel pivot = arr[pivot_idx];
// Move pivot to the end.
swap_yuv(&arr[pivot_idx], &arr[right]);
int p_idx = left;
for (int i = left; i < right; ++i) {
if (arr[i].y <= pivot.y) {
swap_yuv(&arr[i], &arr[p_idx]);
p_idx++;
}
}
swap_yuv(&arr[p_idx], &arr[right]);
return p_idx;
}
// Returns the kth element in array, partially sorted in place (quickselect).
static YuvPixel qselect(YuvPixel arr[], int left, int right, int k) {
if (left >= right) {
return arr[left];
}
unsigned int seed = (int)time(NULL);
int pivot_idx = left + rand_r(&seed) % (right - left + 1);
pivot_idx = partition(arr, left, right, pivot_idx);
if (k == pivot_idx) {
return arr[k];
} else if (k < pivot_idx) {
return qselect(arr, left, pivot_idx - 1, k);
} else {
return qselect(arr, pivot_idx + 1, right, k);
}
}
// Blends image stack together using a temporal median.
static void blend_median(const int width, const int height,
const int num_frames, const YuvPixel ***image_stack,
YuvPixel **blended_img) {
// Allocate stack of pixels
YuvPixel *pixel_stack = aom_calloc(num_frames, sizeof(*pixel_stack));
// Apply median filtering using quickselect.
for (int y = 0; y < height; ++y) {
for (int x = 0; x < width; ++x) {
int count = 0;
for (int i = 0; i < num_frames; ++i) {
if (image_stack[y][x][i].exists) {
pixel_stack[count] = image_stack[y][x][i];
++count;
}
}
if (count == 0) {
// Just make the pixel black.
// TODO(toddnguyen): Color the pixel with nearest neighbor
blended_img[y][x].exists = 0;
} else {
const int median_idx = (int)floor(count / 2);
YuvPixel median = qselect(pixel_stack, 0, count - 1, median_idx);
// Make the median value the 0th index for UV subsampling later
blended_img[y][x] = median;
blended_img[y][x].exists = 1;
}
}
}
aom_free(pixel_stack);
}
#endif // BGSPRITE_BLENDING_MODE == 0
#if BGSPRITE_BLENDING_MODE == 1
// Blends image stack together using a temporal mean.
static void blend_mean(const int width, const int height, const int num_frames,
const YuvPixel ***image_stack, YuvPixel **blended_img,
int highbitdepth) {
for (int y = 0; y < height; ++y) {
for (int x = 0; x < width; ++x) {
// Find
uint32_t y_sum = 0;
uint32_t u_sum = 0;
uint32_t v_sum = 0;
uint32_t count = 0;
for (int i = 0; i < num_frames; ++i) {
if (image_stack[y][x][i].exists) {
y_sum += image_stack[y][x][i].y;
u_sum += image_stack[y][x][i].u;
v_sum += image_stack[y][x][i].v;
++count;
}
}
#if BGSPRITE_MEAN_REMOVE_OUTLIERS
if (count > 1) {
double stdev = 0;
double y_mean = (double)y_sum / count;
for (int i = 0; i < num_frames; ++i) {
if (image_stack[y][x][i].exists) {
stdev += pow(y_mean - image_stack[y][x][i].y, 2);
}
}
stdev = sqrt(stdev / count);
uint32_t inlier_y_sum = 0;
uint32_t inlier_u_sum = 0;
uint32_t inlier_v_sum = 0;
uint32_t inlier_count = 0;
for (int i = 0; i < num_frames; ++i) {
if (image_stack[y][x][i].exists &&
fabs(image_stack[y][x][i].y - y_mean) <= 1.5 * stdev) {
inlier_y_sum += image_stack[y][x][i].y;
inlier_u_sum += image_stack[y][x][i].u;
inlier_v_sum += image_stack[y][x][i].v;
++inlier_count;
}
}
count = inlier_count;
y_sum = inlier_y_sum;
u_sum = inlier_u_sum;
v_sum = inlier_v_sum;
}
#endif // BGSPRITE_MEAN_REMOVE_OUTLIERS
if (count != 0) {
blended_img[y][x].exists = 1;
if (highbitdepth) {
blended_img[y][x].y = (uint16_t)OD_DIVU(y_sum, count);
blended_img[y][x].u = (uint16_t)OD_DIVU(u_sum, count);
blended_img[y][x].v = (uint16_t)OD_DIVU(v_sum, count);
} else {
(void)highbitdepth;
blended_img[y][x].y = (uint8_t)OD_DIVU(y_sum, count);
blended_img[y][x].u = (uint8_t)OD_DIVU(u_sum, count);
blended_img[y][x].v = (uint8_t)OD_DIVU(v_sum, count);
}
} else {
blended_img[y][x].exists = 0;
}
}
}
}
#endif // BGSPRITE_BLENDING_MODE == 1
#if BGSPRITE_ENABLE_SEGMENTATION
// Builds dual-mode single gaussian model from image stack.
static void build_gaussian(const YuvPixel ***image_stack, const int num_frames,
const int width, const int height,
const int x_block_width, const int y_block_height,
const int block_size, YuvPixelGaussian **gauss) {
const double initial_variance = 10.0;
const double s_theta = 2.0;
// Add images to dual-mode single gaussian model
for (int y_block = 0; y_block < y_block_height; ++y_block) {
for (int x_block = 0; x_block < x_block_width; ++x_block) {
// Process all blocks.
YuvPixelGaussian *model = &gauss[y_block][x_block];
// Process all frames.
for (int i = 0; i < num_frames; ++i) {
// Add block to the Gaussian model.
double max_variance[2] = { 0.0, 0.0 };
double temp_y_mean = 0.0;
double temp_u_mean = 0.0;
double temp_v_mean = 0.0;
// Find mean/variance of a block of pixels.
int temp_count = 0;
for (int sub_y = 0; sub_y < block_size; ++sub_y) {
for (int sub_x = 0; sub_x < block_size; ++sub_x) {
const int y = y_block * block_size + sub_y;
const int x = x_block * block_size + sub_x;
if (y < height && x < width && image_stack[y][x][i].exists) {
++temp_count;
temp_y_mean += (double)image_stack[y][x][i].y;
temp_u_mean += (double)image_stack[y][x][i].u;
temp_v_mean += (double)image_stack[y][x][i].v;
const double variance_0 =
pow((double)image_stack[y][x][i].y - model->mean[0], 2);
const double variance_1 =
pow((double)image_stack[y][x][i].y - model->mean[1], 2);
if (variance_0 > max_variance[0]) {
max_variance[0] = variance_0;
}
if (variance_1 > max_variance[1]) {
max_variance[1] = variance_1;
}
}
}
}
// If pixels exist in the block, add to the model.
if (temp_count > 0) {
assert(temp_count <= block_size * block_size);
temp_y_mean /= temp_count;
temp_u_mean /= temp_count;
temp_v_mean /= temp_count;
// Switch the background model to the oldest model.
if (model->age[0] > model->age[1]) {
model->curr_model = 0;
} else if (model->age[1] > model->age[0]) {
model->curr_model = 1;
}
// If model is empty, initialize model.
if (model->age[model->curr_model] == 0) {
model->mean[model->curr_model] = temp_y_mean;
model->u_mean[model->curr_model] = temp_u_mean;
model->v_mean[model->curr_model] = temp_v_mean;
model->var[model->curr_model] = initial_variance;
model->age[model->curr_model] = 1;
} else {
// Constants for current model and foreground model (0 or 1).
const int opposite = 1 - model->curr_model;
const int current = model->curr_model;
const double j = i;
// Put block into the appropriate model.
if (pow(temp_y_mean - model->mean[current], 2) <
s_theta * model->var[current]) {
// Add block to the current background model
model->age[current] += 1;
const double prev_weight = 1 / j;
const double curr_weight = (j - 1) / j;
model->mean[current] = prev_weight * model->mean[current] +
curr_weight * temp_y_mean;
model->u_mean[current] = prev_weight * model->u_mean[current] +
curr_weight * temp_u_mean;
model->v_mean[current] = prev_weight * model->v_mean[current] +
curr_weight * temp_v_mean;
model->var[current] = prev_weight * model->var[current] +
curr_weight * max_variance[current];
} else {
// Block does not fit into current background candidate. Add to
// foreground candidate and reinitialize if necessary.
const double var_fg = pow(temp_y_mean - model->mean[opposite], 2);
if (var_fg <= s_theta * model->var[opposite]) {
model->age[opposite] += 1;
const double prev_weight = 1 / j;
const double curr_weight = (j - 1) / j;
model->mean[opposite] = prev_weight * model->mean[opposite] +
curr_weight * temp_y_mean;
model->u_mean[opposite] =
prev_weight * model->u_mean[opposite] +
curr_weight * temp_u_mean;
model->v_mean[opposite] =
prev_weight * model->v_mean[opposite] +
curr_weight * temp_v_mean;
model->var[opposite] = prev_weight * model->var[opposite] +
curr_weight * max_variance[opposite];
} else if (model->age[opposite] == 0 ||
var_fg > s_theta * model->var[opposite]) {
model->mean[opposite] = temp_y_mean;
model->u_mean[opposite] = temp_u_mean;
model->v_mean[opposite] = temp_v_mean;
model->var[opposite] = initial_variance;
model->age[opposite] = 1;
} else {
// This case should never happen.