Commit 83a91e78 authored by John Koleszar's avatar John Koleszar

RTCD: add variance functions

This commit continues the process of converting to the new RTCD
system.

Change-Id: Ie5c1aa480637e98dc3918fb562ff45c37a66c538
parent f103dcef
......@@ -728,18 +728,18 @@ static void multiframe_quality_enhance_block
unsigned int act, sse, sad, thr;
if (blksize == 16)
{
act = (vp8_variance_var16x16(yd, yd_stride, VP8_ZEROS, 0, &sse)+128)>>8;
sad = (vp8_variance_sad16x16(y, y_stride, yd, yd_stride, 0)+128)>>8;
act = (vp8_variance16x16(yd, yd_stride, VP8_ZEROS, 0, &sse)+128)>>8;
sad = (vp8_sad16x16(y, y_stride, yd, yd_stride, 0)+128)>>8;
}
else if (blksize == 8)
{
act = (vp8_variance_var8x8(yd, yd_stride, VP8_ZEROS, 0, &sse)+32)>>6;
sad = (vp8_variance_sad8x8(y, y_stride, yd, yd_stride, 0)+32)>>6;
act = (vp8_variance8x8(yd, yd_stride, VP8_ZEROS, 0, &sse)+32)>>6;
sad = (vp8_sad8x8(y, y_stride, yd, yd_stride, 0)+32)>>6;
}
else
{
act = (vp8_variance_var4x4(yd, yd_stride, VP8_ZEROS, 0, &sse)+8)>>4;
sad = (vp8_variance_sad4x4(y, y_stride, yd, yd_stride, 0)+8)>>4;
act = (vp8_variance4x4(yd, yd_stride, VP8_ZEROS, 0, &sse)+8)>>4;
sad = (vp8_sad4x4(y, y_stride, yd, yd_stride, 0)+8)>>4;
}
/* thr = qdiff/8 + log2(act) + log4(qprev) */
thr = (qdiff>>3);
......
This diff is collapsed.
......@@ -32,32 +32,6 @@ void vp8_arch_arm_encoder_init(VP8_COMP *cpi)
#if HAVE_MEDIA
if (flags & HAS_MEDIA)
{
cpi->rtcd.variance.sad16x16 = vp8_sad16x16_armv6;
/*cpi->rtcd.variance.sad16x8 = vp8_sad16x8_c;
cpi->rtcd.variance.sad8x16 = vp8_sad8x16_c;
cpi->rtcd.variance.sad8x8 = vp8_sad8x8_c;
cpi->rtcd.variance.sad4x4 = vp8_sad4x4_c;*/
/*cpi->rtcd.variance.var4x4 = vp8_variance4x4_c;*/
cpi->rtcd.variance.var8x8 = vp8_variance8x8_armv6;
/*cpi->rtcd.variance.var8x16 = vp8_variance8x16_c;
cpi->rtcd.variance.var16x8 = vp8_variance16x8_c;*/
cpi->rtcd.variance.var16x16 = vp8_variance16x16_armv6;
/*cpi->rtcd.variance.subpixvar4x4 = vp8_sub_pixel_variance4x4_c;*/
cpi->rtcd.variance.subpixvar8x8 = vp8_sub_pixel_variance8x8_armv6;
/*cpi->rtcd.variance.subpixvar8x16 = vp8_sub_pixel_variance8x16_c;
cpi->rtcd.variance.subpixvar16x8 = vp8_sub_pixel_variance16x8_c;*/
cpi->rtcd.variance.subpixvar16x16 = vp8_sub_pixel_variance16x16_armv6;
cpi->rtcd.variance.halfpixvar16x16_h = vp8_variance_halfpixvar16x16_h_armv6;
cpi->rtcd.variance.halfpixvar16x16_v = vp8_variance_halfpixvar16x16_v_armv6;
cpi->rtcd.variance.halfpixvar16x16_hv = vp8_variance_halfpixvar16x16_hv_armv6;
cpi->rtcd.variance.mse16x16 = vp8_mse16x16_armv6;
/*cpi->rtcd.variance.getmbss = vp8_get_mb_ss_c;*/
/*cpi->rtcd.variance.get4x4sse_cs = vp8_get4x4sse_cs_c;*/
cpi->rtcd.fdct.short4x4 = vp8_short_fdct4x4_armv6;
cpi->rtcd.fdct.short8x4 = vp8_short_fdct8x4_armv6;
cpi->rtcd.fdct.fast4x4 = vp8_short_fdct4x4_armv6;
......@@ -79,32 +53,6 @@ void vp8_arch_arm_encoder_init(VP8_COMP *cpi)
#if HAVE_NEON
if (flags & HAS_NEON)
{
cpi->rtcd.variance.sad16x16 = vp8_sad16x16_neon;
cpi->rtcd.variance.sad16x8 = vp8_sad16x8_neon;
cpi->rtcd.variance.sad8x16 = vp8_sad8x16_neon;
cpi->rtcd.variance.sad8x8 = vp8_sad8x8_neon;
cpi->rtcd.variance.sad4x4 = vp8_sad4x4_neon;
/*cpi->rtcd.variance.var4x4 = vp8_variance4x4_c;*/
cpi->rtcd.variance.var8x8 = vp8_variance8x8_neon;
cpi->rtcd.variance.var8x16 = vp8_variance8x16_neon;
cpi->rtcd.variance.var16x8 = vp8_variance16x8_neon;
cpi->rtcd.variance.var16x16 = vp8_variance16x16_neon;
/*cpi->rtcd.variance.subpixvar4x4 = vp8_sub_pixel_variance4x4_c;*/
cpi->rtcd.variance.subpixvar8x8 = vp8_sub_pixel_variance8x8_neon;
/*cpi->rtcd.variance.subpixvar8x16 = vp8_sub_pixel_variance8x16_c;
cpi->rtcd.variance.subpixvar16x8 = vp8_sub_pixel_variance16x8_c;*/
cpi->rtcd.variance.subpixvar16x16 = vp8_sub_pixel_variance16x16_neon;
cpi->rtcd.variance.halfpixvar16x16_h = vp8_variance_halfpixvar16x16_h_neon;
cpi->rtcd.variance.halfpixvar16x16_v = vp8_variance_halfpixvar16x16_v_neon;
cpi->rtcd.variance.halfpixvar16x16_hv = vp8_variance_halfpixvar16x16_hv_neon;
cpi->rtcd.variance.mse16x16 = vp8_mse16x16_neon;
/*cpi->rtcd.variance.getmbss = vp8_get_mb_ss_c;*/
cpi->rtcd.variance.get4x4sse_cs = vp8_get4x4sse_cs_neon;
cpi->rtcd.fdct.short4x4 = vp8_short_fdct4x4_neon;
cpi->rtcd.fdct.short8x4 = vp8_short_fdct8x4_neon;
cpi->rtcd.fdct.fast4x4 = vp8_short_fdct4x4_neon;
......
......@@ -9,6 +9,7 @@
*/
#include "vpx_config.h"
#include "vpx_rtcd.h"
#include "vp8/encoder/variance.h"
#include "vp8/common/filter.h"
......
/*
* Copyright (c) 2010 The WebM project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef VARIANCE_ARM_H
#define VARIANCE_ARM_H
#if HAVE_MEDIA
extern prototype_sad(vp8_sad16x16_armv6);
extern prototype_variance(vp8_variance16x16_armv6);
extern prototype_variance(vp8_variance8x8_armv6);
extern prototype_subpixvariance(vp8_sub_pixel_variance16x16_armv6);
extern prototype_subpixvariance(vp8_sub_pixel_variance8x8_armv6);
extern prototype_variance(vp8_variance_halfpixvar16x16_h_armv6);
extern prototype_variance(vp8_variance_halfpixvar16x16_v_armv6);
extern prototype_variance(vp8_variance_halfpixvar16x16_hv_armv6);
extern prototype_variance(vp8_mse16x16_armv6);
#if !CONFIG_RUNTIME_CPU_DETECT
#undef vp8_variance_sad16x16
#define vp8_variance_sad16x16 vp8_sad16x16_armv6
#undef vp8_variance_subpixvar16x16
#define vp8_variance_subpixvar16x16 vp8_sub_pixel_variance16x16_armv6
#undef vp8_variance_subpixvar8x8
#define vp8_variance_subpixvar8x8 vp8_sub_pixel_variance8x8_armv6
#undef vp8_variance_var16x16
#define vp8_variance_var16x16 vp8_variance16x16_armv6
#undef vp8_variance_mse16x16
#define vp8_variance_mse16x16 vp8_mse16x16_armv6
#undef vp8_variance_var8x8
#define vp8_variance_var8x8 vp8_variance8x8_armv6
#undef vp8_variance_halfpixvar16x16_h
#define vp8_variance_halfpixvar16x16_h vp8_variance_halfpixvar16x16_h_armv6
#undef vp8_variance_halfpixvar16x16_v
#define vp8_variance_halfpixvar16x16_v vp8_variance_halfpixvar16x16_v_armv6
#undef vp8_variance_halfpixvar16x16_hv
#define vp8_variance_halfpixvar16x16_hv vp8_variance_halfpixvar16x16_hv_armv6
#endif /* !CONFIG_RUNTIME_CPU_DETECT */
#endif /* HAVE_MEDIA */
#if HAVE_NEON
extern prototype_sad(vp8_sad4x4_neon);
extern prototype_sad(vp8_sad8x8_neon);
extern prototype_sad(vp8_sad8x16_neon);
extern prototype_sad(vp8_sad16x8_neon);
extern prototype_sad(vp8_sad16x16_neon);
//extern prototype_variance(vp8_variance4x4_c);
extern prototype_variance(vp8_variance8x8_neon);
extern prototype_variance(vp8_variance8x16_neon);
extern prototype_variance(vp8_variance16x8_neon);
extern prototype_variance(vp8_variance16x16_neon);
//extern prototype_subpixvariance(vp8_sub_pixel_variance4x4_c);
extern prototype_subpixvariance(vp8_sub_pixel_variance8x8_neon);
//extern prototype_subpixvariance(vp8_sub_pixel_variance8x16_c);
//extern prototype_subpixvariance(vp8_sub_pixel_variance16x8_c);
extern prototype_subpixvariance(vp8_sub_pixel_variance16x16_neon);
extern prototype_subpixvariance(vp8_sub_pixel_variance16x16_neon_func);
extern prototype_variance(vp8_variance_halfpixvar16x16_h_neon);
extern prototype_variance(vp8_variance_halfpixvar16x16_v_neon);
extern prototype_variance(vp8_variance_halfpixvar16x16_hv_neon);
//extern prototype_getmbss(vp8_get_mb_ss_c);
extern prototype_variance(vp8_mse16x16_neon);
extern prototype_get16x16prederror(vp8_get4x4sse_cs_neon);
#if !CONFIG_RUNTIME_CPU_DETECT
#undef vp8_variance_sad4x4
#define vp8_variance_sad4x4 vp8_sad4x4_neon
#undef vp8_variance_sad8x8
#define vp8_variance_sad8x8 vp8_sad8x8_neon
#undef vp8_variance_sad8x16
#define vp8_variance_sad8x16 vp8_sad8x16_neon
#undef vp8_variance_sad16x8
#define vp8_variance_sad16x8 vp8_sad16x8_neon
#undef vp8_variance_sad16x16
#define vp8_variance_sad16x16 vp8_sad16x16_neon
//#undef vp8_variance_var4x4
//#define vp8_variance_var4x4 vp8_variance4x4_c
#undef vp8_variance_var8x8
#define vp8_variance_var8x8 vp8_variance8x8_neon
#undef vp8_variance_var8x16
#define vp8_variance_var8x16 vp8_variance8x16_neon
#undef vp8_variance_var16x8
#define vp8_variance_var16x8 vp8_variance16x8_neon
#undef vp8_variance_var16x16
#define vp8_variance_var16x16 vp8_variance16x16_neon
//#undef vp8_variance_subpixvar4x4
//#define vp8_variance_subpixvar4x4 vp8_sub_pixel_variance4x4_c
#undef vp8_variance_subpixvar8x8
#define vp8_variance_subpixvar8x8 vp8_sub_pixel_variance8x8_neon
//#undef vp8_variance_subpixvar8x16
//#define vp8_variance_subpixvar8x16 vp8_sub_pixel_variance8x16_c
//#undef vp8_variance_subpixvar16x8
//#define vp8_variance_subpixvar16x8 vp8_sub_pixel_variance16x8_c
#undef vp8_variance_subpixvar16x16
#define vp8_variance_subpixvar16x16 vp8_sub_pixel_variance16x16_neon
#undef vp8_variance_halfpixvar16x16_h
#define vp8_variance_halfpixvar16x16_h vp8_variance_halfpixvar16x16_h_neon
#undef vp8_variance_halfpixvar16x16_v
#define vp8_variance_halfpixvar16x16_v vp8_variance_halfpixvar16x16_v_neon
#undef vp8_variance_halfpixvar16x16_hv
#define vp8_variance_halfpixvar16x16_hv vp8_variance_halfpixvar16x16_hv_neon
//#undef vp8_variance_getmbss
//#define vp8_variance_getmbss vp8_get_mb_ss_c
#undef vp8_variance_mse16x16
#define vp8_variance_mse16x16 vp8_mse16x16_neon
#undef vp8_variance_get4x4sse_cs
#define vp8_variance_get4x4sse_cs vp8_get4x4sse_cs_neon
#endif /* !CONFIG_RUNTIME_CPU_DETECT */
#endif /* HAVE_NEON */
#endif
......@@ -96,7 +96,7 @@ static unsigned int tt_activity_measure( VP8_COMP *cpi, MACROBLOCK *x )
* lambda using a non-linear combination (e.g., the smallest, or second
* smallest, etc.).
*/
act = VARIANCE_INVOKE(&cpi->rtcd.variance, var16x16)(x->src.y_buffer,
act = vp8_variance16x16(x->src.y_buffer,
x->src.y_stride, VP8_VAR_OFFS, 0, &sse);
act = act<<4;
......
......@@ -53,7 +53,7 @@ int vp8_encode_intra(VP8_COMP *cpi, MACROBLOCK *x, int use_dc_pred)
}
}
intra_pred_var = VARIANCE_INVOKE(&cpi->rtcd.variance, getmbss)(x->src_diff);
intra_pred_var = vp8_get_mb_ss(x->src_diff);
return intra_pred_var;
}
......
......@@ -409,7 +409,7 @@ static void zz_motion_search( VP8_COMP *cpi, MACROBLOCK * x, YV12_BUFFER_CONFIG
ref_ptr = (unsigned char *)(*(d->base_pre) + d->pre );
VARIANCE_INVOKE(IF_RTCD(&cpi->rtcd.variance), mse16x16) ( src_ptr, src_stride, ref_ptr, ref_stride, (unsigned int *)(best_motion_err));
vp8_mse16x16 ( src_ptr, src_stride, ref_ptr, ref_stride, (unsigned int *)(best_motion_err));
}
static void first_pass_motion_search(VP8_COMP *cpi, MACROBLOCK *x,
......@@ -433,7 +433,7 @@ static void first_pass_motion_search(VP8_COMP *cpi, MACROBLOCK *x,
int new_mv_mode_penalty = 256;
// override the default variance function to use MSE
v_fn_ptr.vf = VARIANCE_INVOKE(IF_RTCD(&cpi->rtcd.variance), mse16x16);
v_fn_ptr.vf = vp8_mse16x16;
// Set up pointers for this macro block recon buffer
xd->pre.y_buffer = recon_buffer->y_buffer + recon_yoffset;
......
......@@ -25,53 +25,6 @@ extern void vp8_yv12_copy_partial_frame(YV12_BUFFER_CONFIG *src_ybc,
void vp8_cmachine_specific_config(VP8_COMP *cpi)
{
#if CONFIG_RUNTIME_CPU_DETECT
cpi->rtcd.variance.sad16x16 = vp8_sad16x16_c;
cpi->rtcd.variance.sad16x8 = vp8_sad16x8_c;
cpi->rtcd.variance.sad8x16 = vp8_sad8x16_c;
cpi->rtcd.variance.sad8x8 = vp8_sad8x8_c;
cpi->rtcd.variance.sad4x4 = vp8_sad4x4_c;
cpi->rtcd.variance.sad16x16x3 = vp8_sad16x16x3_c;
cpi->rtcd.variance.sad16x8x3 = vp8_sad16x8x3_c;
cpi->rtcd.variance.sad8x16x3 = vp8_sad8x16x3_c;
cpi->rtcd.variance.sad8x8x3 = vp8_sad8x8x3_c;
cpi->rtcd.variance.sad4x4x3 = vp8_sad4x4x3_c;
cpi->rtcd.variance.sad16x16x8 = vp8_sad16x16x8_c;
cpi->rtcd.variance.sad16x8x8 = vp8_sad16x8x8_c;
cpi->rtcd.variance.sad8x16x8 = vp8_sad8x16x8_c;
cpi->rtcd.variance.sad8x8x8 = vp8_sad8x8x8_c;
cpi->rtcd.variance.sad4x4x8 = vp8_sad4x4x8_c;
cpi->rtcd.variance.sad16x16x4d = vp8_sad16x16x4d_c;
cpi->rtcd.variance.sad16x8x4d = vp8_sad16x8x4d_c;
cpi->rtcd.variance.sad8x16x4d = vp8_sad8x16x4d_c;
cpi->rtcd.variance.sad8x8x4d = vp8_sad8x8x4d_c;
cpi->rtcd.variance.sad4x4x4d = vp8_sad4x4x4d_c;
#if ARCH_X86 || ARCH_X86_64
cpi->rtcd.variance.copy32xn = vp8_copy32xn_c;
#endif
cpi->rtcd.variance.var4x4 = vp8_variance4x4_c;
cpi->rtcd.variance.var8x8 = vp8_variance8x8_c;
cpi->rtcd.variance.var8x16 = vp8_variance8x16_c;
cpi->rtcd.variance.var16x8 = vp8_variance16x8_c;
cpi->rtcd.variance.var16x16 = vp8_variance16x16_c;
cpi->rtcd.variance.subpixvar4x4 = vp8_sub_pixel_variance4x4_c;
cpi->rtcd.variance.subpixvar8x8 = vp8_sub_pixel_variance8x8_c;
cpi->rtcd.variance.subpixvar8x16 = vp8_sub_pixel_variance8x16_c;
cpi->rtcd.variance.subpixvar16x8 = vp8_sub_pixel_variance16x8_c;
cpi->rtcd.variance.subpixvar16x16 = vp8_sub_pixel_variance16x16_c;
cpi->rtcd.variance.halfpixvar16x16_h = vp8_variance_halfpixvar16x16_h_c;
cpi->rtcd.variance.halfpixvar16x16_v = vp8_variance_halfpixvar16x16_v_c;
cpi->rtcd.variance.halfpixvar16x16_hv = vp8_variance_halfpixvar16x16_hv_c;
cpi->rtcd.variance.subpixmse16x16 = vp8_sub_pixel_mse16x16_c;
cpi->rtcd.variance.mse16x16 = vp8_mse16x16_c;
cpi->rtcd.variance.getmbss = vp8_get_mb_ss_c;
cpi->rtcd.variance.get4x4sse_cs = vp8_get4x4sse_cs_c;
cpi->rtcd.fdct.short4x4 = vp8_short_fdct4x4_c;
cpi->rtcd.fdct.short8x4 = vp8_short_fdct8x4_c;
cpi->rtcd.fdct.fast4x4 = vp8_short_fdct4x4_c;
......@@ -95,10 +48,6 @@ void vp8_cmachine_specific_config(VP8_COMP *cpi)
#if !(CONFIG_REALTIME_ONLY)
cpi->rtcd.temporal.apply = vp8_temporal_filter_apply_c;
#endif
#if CONFIG_INTERNAL_STATS
cpi->rtcd.variance.ssimpf_8x8 = vp8_ssim_parms_8x8_c;
cpi->rtcd.variance.ssimpf_16x16 = vp8_ssim_parms_16x16_c;
#endif
#endif
// Pure C:
......
This diff is collapsed.
......@@ -224,7 +224,6 @@ typedef struct
typedef struct VP8_ENCODER_RTCD
{
vp8_variance_rtcd_vtable_t variance;
vp8_fdct_rtcd_vtable_t fdct;
vp8_encodemb_rtcd_vtable_t encodemb;
vp8_quantize_rtcd_vtable_t quantize;
......
......@@ -31,7 +31,7 @@
#define IF_RTCD(x) NULL
#endif
extern int VP8_UVSSE(MACROBLOCK *x, const vp8_variance_rtcd_vtable_t *rtcd);
extern int VP8_UVSSE(MACROBLOCK *x);
#ifdef SPEEDSTATS
extern unsigned int cnt_pm;
......@@ -40,7 +40,6 @@ extern unsigned int cnt_pm;
extern const int vp8_ref_frame_order[MAX_MODES];
extern const MB_PREDICTION_MODE vp8_mode_order[MAX_MODES];
extern unsigned int (*vp8_get4x4sse_cs)(unsigned char *src_ptr, int source_stride, unsigned char *ref_ptr, int recon_stride);
extern int vp8_cost_mv_ref(MB_PREDICTION_MODE m, const int near_mv_ref_ct[4]);
......@@ -120,14 +119,14 @@ unsigned int vp8_get4x4sse_cs_c
return distortion;
}
static int get_prediction_error(BLOCK *be, BLOCKD *b, const vp8_variance_rtcd_vtable_t *rtcd)
static int get_prediction_error(BLOCK *be, BLOCKD *b)
{
unsigned char *sptr;
unsigned char *dptr;
sptr = (*(be->base_src) + be->src);
dptr = b->predictor;
return VARIANCE_INVOKE(rtcd, get4x4sse_cs)(sptr, be->src_stride, dptr, 16);
return vp8_get4x4sse_cs(sptr, be->src_stride, dptr, 16);
}
......@@ -157,7 +156,7 @@ static int pick_intra4x4block(
vp8_intra4x4_predict
(*(b->base_dst) + b->dst, b->dst_stride,
mode, b->predictor, 16);
distortion = get_prediction_error(be, b, &rtcd->variance);
distortion = get_prediction_error(be, b);
this_rd = RDCOST(x->rdmult, x->rddiv, rate, distortion);
if (this_rd < best_rd)
......@@ -674,8 +673,7 @@ void vp8_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset,
else
{
rate2 += rate;
distortion2 = VARIANCE_INVOKE
(&cpi->rtcd.variance, var16x16)(
distortion2 = vp8_variance16x16(
*(b->base_src), b->src_stride,
x->e_mbd.predictor, 16, &sse);
this_rd = RDCOST(x->rdmult, x->rddiv, rate2, distortion2);
......@@ -700,7 +698,7 @@ void vp8_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset,
case TM_PRED:
vp8_build_intra_predictors_mby
(&x->e_mbd);
distortion2 = VARIANCE_INVOKE(&cpi->rtcd.variance, var16x16)
distortion2 = vp8_variance16x16
(*(b->base_src), b->src_stride,
x->e_mbd.predictor, 16, &sse);
rate2 += x->mbmode_cost[x->e_mbd.frame_type][x->e_mbd.mode_info_context->mbmi.mode];
......@@ -937,7 +935,7 @@ void vp8_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset,
// Check u and v to make sure skip is ok
int sse2 = 0;
sse2 = VP8_UVSSE(x, IF_RTCD(&cpi->rtcd.variance));
sse2 = VP8_UVSSE(x);
if (sse2 * 2 < x->encode_breakout)
x->skip = 1;
......@@ -1071,7 +1069,7 @@ void vp8_pick_intra_mode(VP8_COMP *cpi, MACROBLOCK *x, int *rate_)
x->e_mbd.mode_info_context->mbmi.mode = mode;
vp8_build_intra_predictors_mby
(&x->e_mbd);
distortion = VARIANCE_INVOKE(&cpi->rtcd.variance, var16x16)
distortion = vp8_variance16x16
(*(b->base_src), b->src_stride, x->e_mbd.predictor, 16, &sse);
rate = x->mbmode_cost[x->e_mbd.frame_type][mode];
this_rd = RDCOST(x->rdmult, x->rddiv, rate, distortion);
......
......@@ -21,7 +21,7 @@
#include "vpx_ports/arm.h"
#endif
extern int vp8_calc_ss_err(YV12_BUFFER_CONFIG *source, YV12_BUFFER_CONFIG *dest, const vp8_variance_rtcd_vtable_t *rtcd);
extern int vp8_calc_ss_err(YV12_BUFFER_CONFIG *source, YV12_BUFFER_CONFIG *dest);
#if CONFIG_RUNTIME_CPU_DETECT
#define IF_RTCD(x) (x)
......@@ -64,8 +64,7 @@ void vp8_yv12_copy_partial_frame(YV12_BUFFER_CONFIG *src_ybc,
}
static int calc_partial_ssl_err(YV12_BUFFER_CONFIG *source,
YV12_BUFFER_CONFIG *dest,
const vp8_variance_rtcd_vtable_t *rtcd)
YV12_BUFFER_CONFIG *dest)
{
int i, j;
int Total = 0;
......@@ -93,7 +92,7 @@ static int calc_partial_ssl_err(YV12_BUFFER_CONFIG *source,
for (j = 0; j < source->y_width; j += 16)
{
unsigned int sse;
Total += VARIANCE_INVOKE(rtcd, mse16x16)(src + j, source->y_stride,
Total += vp8_mse16x16(src + j, source->y_stride,
dst + j, dest->y_stride,
&sse);
}
......@@ -183,8 +182,7 @@ void vp8cx_pick_filter_level_fast(YV12_BUFFER_CONFIG *sd, VP8_COMP *cpi)
vp8_yv12_copy_partial_frame_ptr(saved_frame, cm->frame_to_show);
vp8_loop_filter_partial_frame(cm, &cpi->mb.e_mbd, filt_val);
best_err = calc_partial_ssl_err(sd, cm->frame_to_show,
IF_RTCD(&cpi->rtcd.variance));
best_err = calc_partial_ssl_err(sd, cm->frame_to_show);
filt_val -= 1 + (filt_val > 10);
......@@ -196,8 +194,7 @@ void vp8cx_pick_filter_level_fast(YV12_BUFFER_CONFIG *sd, VP8_COMP *cpi)
vp8_loop_filter_partial_frame(cm, &cpi->mb.e_mbd, filt_val);
// Get the err for filtered frame
filt_err = calc_partial_ssl_err(sd, cm->frame_to_show,
IF_RTCD(&cpi->rtcd.variance));
filt_err = calc_partial_ssl_err(sd, cm->frame_to_show);
// Update the best case record or exit loop.
if (filt_err < best_err)
......@@ -228,8 +225,7 @@ void vp8cx_pick_filter_level_fast(YV12_BUFFER_CONFIG *sd, VP8_COMP *cpi)
vp8_loop_filter_partial_frame(cm, &cpi->mb.e_mbd, filt_val);
// Get the err for filtered frame
filt_err = calc_partial_ssl_err(sd, cm->frame_to_show,
IF_RTCD(&cpi->rtcd.variance));
filt_err = calc_partial_ssl_err(sd, cm->frame_to_show);
// Update the best case record or exit loop.
if (filt_err < best_err)
......@@ -322,8 +318,7 @@ void vp8cx_pick_filter_level(YV12_BUFFER_CONFIG *sd, VP8_COMP *cpi)
vp8cx_set_alt_lf_level(cpi, filt_mid);
vp8_loop_filter_frame_yonly(cm, &cpi->mb.e_mbd, filt_mid);
best_err = vp8_calc_ss_err(sd, cm->frame_to_show,
IF_RTCD(&cpi->rtcd.variance));
best_err = vp8_calc_ss_err(sd, cm->frame_to_show);
ss_err[filt_mid] = best_err;
......@@ -349,8 +344,7 @@ void vp8cx_pick_filter_level(YV12_BUFFER_CONFIG *sd, VP8_COMP *cpi)
vp8cx_set_alt_lf_level(cpi, filt_low);
vp8_loop_filter_frame_yonly(cm, &cpi->mb.e_mbd, filt_low);
filt_err = vp8_calc_ss_err(sd, cm->frame_to_show,
IF_RTCD(&cpi->rtcd.variance));
filt_err = vp8_calc_ss_err(sd, cm->frame_to_show);
ss_err[filt_low] = filt_err;
}
else
......@@ -376,8 +370,7 @@ void vp8cx_pick_filter_level(YV12_BUFFER_CONFIG *sd, VP8_COMP *cpi)
vp8cx_set_alt_lf_level(cpi, filt_high);
vp8_loop_filter_frame_yonly(cm, &cpi->mb.e_mbd, filt_high);
filt_err = vp8_calc_ss_err(sd, cm->frame_to_show,
IF_RTCD(&cpi->rtcd.variance));
filt_err = vp8_calc_ss_err(sd, cm->frame_to_show);
ss_err[filt_high] = filt_err;
}
else
......
......@@ -453,7 +453,7 @@ int vp8_mbuverror_c(MACROBLOCK *mb)
return error;
}
int VP8_UVSSE(MACROBLOCK *x, const vp8_variance_rtcd_vtable_t *rtcd)
int VP8_UVSSE(MACROBLOCK *x)
{
unsigned char *uptr, *vptr;
unsigned char *upred_ptr = (*(x->block[16].base_src) + x->block[16].src);
......@@ -486,17 +486,17 @@ int VP8_UVSSE(MACROBLOCK *x, const vp8_variance_rtcd_vtable_t *rtcd)
if ((mv_row | mv_col) & 7)
{
VARIANCE_INVOKE(rtcd, subpixvar8x8)(uptr, pre_stride,
vp8_sub_pixel_variance8x8(uptr, pre_stride,
mv_col & 7, mv_row & 7, upred_ptr, uv_stride, &sse2);
VARIANCE_INVOKE(rtcd, subpixvar8x8)(vptr, pre_stride,
vp8_sub_pixel_variance8x8(vptr, pre_stride,
mv_col & 7, mv_row & 7, vpred_ptr, uv_stride, &sse1);
sse2 += sse1;
}
else
{
VARIANCE_INVOKE(rtcd, var8x8)(uptr, pre_stride,
vp8_variance8x8(uptr, pre_stride,
upred_ptr, uv_stride, &sse2);
VARIANCE_INVOKE(rtcd, var8x8)(vptr, pre_stride,
vp8_variance8x8(vptr, pre_stride,
vpred_ptr, uv_stride, &sse1);
sse2 += sse1;
}
......@@ -2137,7 +2137,7 @@ void vp8_rd_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset,
if(threshold < x->encode_breakout)
threshold = x->encode_breakout;
var = VARIANCE_INVOKE(&cpi->rtcd.variance, var16x16)
var = vp8_variance16x16
(*(b->base_src), b->src_stride,
x->e_mbd.predictor, 16, &sse);
......@@ -2150,7 +2150,7 @@ void vp8_rd_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset,
(sse /2 > var && sse-var < 64))
{
// Check u and v to make sure skip is ok
int sse2= VP8_UVSSE(x, IF_RTCD(&cpi->rtcd.variance));
int sse2= VP8_UVSSE(x);
if (sse2 * 2 < threshold)
{
x->skip = 1;
......
......@@ -94,25 +94,22 @@ static double similarity
return ssim_n * 1.0 / ssim_d;
}
static double ssim_16x16(unsigned char *s,int sp, unsigned char *r,int rp,
const vp8_variance_rtcd_vtable_t *rtcd)
static double ssim_16x16(unsigned char *s,int sp, unsigned char *r,int rp)
{
unsigned long sum_s=0,sum_r=0,sum_sq_s=0,sum_sq_r=0,sum_sxr=0;
SSIMPF_INVOKE(rtcd,16x16)(s, sp, r, rp, &sum_s, &sum_r, &sum_sq_s, &sum_sq_r, &sum_sxr);
vp8_ssim_parms_16x16(s, sp, r, rp, &sum_s, &sum_r, &sum_sq_s, &sum_sq_r, &sum_sxr);
return similarity(sum_s, sum_r, sum_sq_s, sum_sq_r, sum_sxr, 256);
}
static double ssim_8x8(unsigned char *s,int sp, unsigned char *r,int rp,
const vp8_variance_rtcd_vtable_t *rtcd)
static double ssim_8x8(unsigned char *s,int sp, unsigned char *r,int rp)
{
unsigned long sum_s=0,sum_r=0,sum_sq_s=0,sum_sq_r=0,sum_sxr=0;
SSIMPF_INVOKE(rtcd,8x8)(s, sp, r, rp, &sum_s, &sum_r, &sum_sq_s, &sum_sq_r, &sum_sxr);
vp8_ssim_parms_8x8(s, sp, r, rp, &sum_s, &sum_r, &sum_sq_s, &sum_sq_r, &sum_sxr);
return similarity(sum_s, sum_r, sum_sq_s, sum_sq_r, sum_sxr, 64);
}
// TODO: (jbb) tried to scale this function such that we may be able to use it
// for distortion metric in mode selection code ( provided we do a reconstruction)
long dssim(unsigned char *s,int sp, unsigned char *r,int rp,
const vp8_variance_rtcd_vtable_t *rtcd)
long dssim(unsigned char *s,int sp, unsigned char *r,int rp)
{
unsigned long sum_s=0,sum_r=0,sum_sq_s=0,sum_sq_r=0,sum_sxr=0;
int64_t ssim3;
......@@ -125,7 +122,7 @@ long dssim(unsigned char *s,int sp, unsigned char *r,int rp,
c1 = cc1*16;
c2 = cc2*16;
SSIMPF_INVOKE(rtcd,16x16)(s, sp, r, rp, &sum_s, &sum_r, &sum_sq_s, &sum_sq_r, &sum_sxr);
vp8_ssim_parms_16x16(s, sp, r, rp, &sum_s, &sum_r, &sum_sq_s, &sum_sq_r, &sum_sxr);
ssim_n1 = (2*sum_s*sum_r+ c1);
ssim_n2 =((int64_t) 2*256*sum_sxr-(int64_t) 2*sum_s*sum_r+c2);
......@@ -154,8 +151,7 @@ double vp8_ssim2
int stride_img1,
int stride_img2,
int width,
int height,