Commit 83a91e78 authored by John Koleszar's avatar John Koleszar
Browse files

RTCD: add variance functions

This commit continues the process of converting to the new RTCD
system.

Change-Id: Ie5c1aa480637e98dc3918fb562ff45c37a66c538
parent f103dcef
......@@ -728,18 +728,18 @@ static void multiframe_quality_enhance_block
unsigned int act, sse, sad, thr;
if (blksize == 16)
{
act = (vp8_variance_var16x16(yd, yd_stride, VP8_ZEROS, 0, &sse)+128)>>8;
sad = (vp8_variance_sad16x16(y, y_stride, yd, yd_stride, 0)+128)>>8;
act = (vp8_variance16x16(yd, yd_stride, VP8_ZEROS, 0, &sse)+128)>>8;
sad = (vp8_sad16x16(y, y_stride, yd, yd_stride, 0)+128)>>8;
}
else if (blksize == 8)
{
act = (vp8_variance_var8x8(yd, yd_stride, VP8_ZEROS, 0, &sse)+32)>>6;
sad = (vp8_variance_sad8x8(y, y_stride, yd, yd_stride, 0)+32)>>6;
act = (vp8_variance8x8(yd, yd_stride, VP8_ZEROS, 0, &sse)+32)>>6;
sad = (vp8_sad8x8(y, y_stride, yd, yd_stride, 0)+32)>>6;
}
else
{
act = (vp8_variance_var4x4(yd, yd_stride, VP8_ZEROS, 0, &sse)+8)>>4;
sad = (vp8_variance_sad4x4(y, y_stride, yd, yd_stride, 0)+8)>>4;
act = (vp8_variance4x4(yd, yd_stride, VP8_ZEROS, 0, &sse)+8)>>4;
sad = (vp8_sad4x4(y, y_stride, yd, yd_stride, 0)+8)>>4;
}
/* thr = qdiff/8 + log2(act) + log4(qprev) */
thr = (qdiff>>3);
......
......@@ -195,3 +195,202 @@ vp8_bilinear_predict8x4_media=vp8_bilinear_predict8x4_armv6
prototype void vp8_bilinear_predict4x4 "unsigned char *src, int src_pitch, int xofst, int yofst, unsigned char *dst, int dst_pitch"
specialize vp8_bilinear_predict4x4 mmx media neon
vp8_bilinear_predict4x4_media=vp8_bilinear_predict4x4_armv6
#
# Encoder functions below this point.
#
if [ "$CONFIG_VP8_ENCODER" = "yes" ]; then
#
# Whole-pixel Variance
#
prototype unsigned int vp8_variance4x4 "const unsigned char *src_ptr, int source_stride, const unsigned char *ref_ptr, int ref_stride, unsigned int *sse"
specialize vp8_variance4x4 mmx sse2
vp8_variance4x4_sse2=vp8_variance4x4_wmt
prototype unsigned int vp8_variance8x8 "const unsigned char *src_ptr, int source_stride, const unsigned char *ref_ptr, int ref_stride, unsigned int *sse"
specialize vp8_variance8x8 mmx sse2 media neon
vp8_variance8x8_sse2=vp8_variance8x8_wmt
vp8_variance8x8_media=vp8_variance8x8_armv6
prototype unsigned int vp8_variance8x16 "const unsigned char *src_ptr, int source_stride, const unsigned char *ref_ptr, int ref_stride, unsigned int *sse"
specialize vp8_variance8x16 mmx sse2 neon
vp8_variance8x16_sse2=vp8_variance8x16_wmt
prototype unsigned int vp8_variance16x8 "const unsigned char *src_ptr, int source_stride, const unsigned char *ref_ptr, int ref_stride, unsigned int *sse"
specialize vp8_variance16x8 mmx sse2 neon
vp8_variance16x8_sse2=vp8_variance16x8_wmt
prototype unsigned int vp8_variance16x16 "const unsigned char *src_ptr, int source_stride, const unsigned char *ref_ptr, int ref_stride, unsigned int *sse"
specialize vp8_variance16x16 mmx sse2 media neon
vp8_variance16x16_sse2=vp8_variance16x16_wmt
vp8_variance16x16_media=vp8_variance16x16_armv6
#
# Sub-pixel Variance
#
prototype unsigned int vp8_sub_pixel_variance4x4 "const unsigned char *src_ptr, int source_stride, int xoffset, int yoffset, const unsigned char *ref_ptr, int Refstride, unsigned int *sse"
specialize vp8_sub_pixel_variance4x4 mmx sse2
vp8_sub_pixel_variance4x4_sse2=vp8_sub_pixel_variance4x4_wmt
prototype unsigned int vp8_sub_pixel_variance8x8 "const unsigned char *src_ptr, int source_stride, int xoffset, int yoffset, const unsigned char *ref_ptr, int Refstride, unsigned int *sse"
specialize vp8_sub_pixel_variance8x8 mmx sse2 media neon
vp8_sub_pixel_variance8x8_sse2=vp8_sub_pixel_variance8x8_wmt
vp8_sub_pixel_variance8x8_media=vp8_sub_pixel_variance8x8_armv6
prototype unsigned int vp8_sub_pixel_variance8x16 "const unsigned char *src_ptr, int source_stride, int xoffset, int yoffset, const unsigned char *ref_ptr, int Refstride, unsigned int *sse"
specialize vp8_sub_pixel_variance8x16 mmx sse2
vp8_sub_pixel_variance8x16_sse2=vp8_sub_pixel_variance8x16_wmt
prototype unsigned int vp8_sub_pixel_variance16x8 "const unsigned char *src_ptr, int source_stride, int xoffset, int yoffset, const unsigned char *ref_ptr, int Refstride, unsigned int *sse"
specialize vp8_sub_pixel_variance16x8 mmx sse2 ssse3
vp8_sub_pixel_variance16x8_sse2=vp8_sub_pixel_variance16x8_wmt
prototype unsigned int vp8_sub_pixel_variance16x16 "const unsigned char *src_ptr, int source_stride, int xoffset, int yoffset, const unsigned char *ref_ptr, int Refstride, unsigned int *sse"
specialize vp8_sub_pixel_variance16x16 mmx sse2 ssse3 media neon
vp8_sub_pixel_variance16x16_sse2=vp8_sub_pixel_variance16x16_wmt
vp8_sub_pixel_variance16x16_media=vp8_sub_pixel_variance16x16_armv6
prototype unsigned int vp8_variance_halfpixvar16x16_h "const unsigned char *src_ptr, int source_stride, const unsigned char *ref_ptr, int ref_stride, unsigned int *sse"
specialize vp8_variance_halfpixvar16x16_h mmx sse2 media neon
vp8_variance_halfpixvar16x16_h_sse2=vp8_variance_halfpixvar16x16_h_wmt
vp8_variance_halfpixvar16x16_h_media=vp8_variance_halfpixvar16x16_h_armv6
prototype unsigned int vp8_variance_halfpixvar16x16_v "const unsigned char *src_ptr, int source_stride, const unsigned char *ref_ptr, int ref_stride, unsigned int *sse"
specialize vp8_variance_halfpixvar16x16_v mmx sse2 media neon
vp8_variance_halfpixvar16x16_v_sse2=vp8_variance_halfpixvar16x16_v_wmt
vp8_variance_halfpixvar16x16_v_media=vp8_variance_halfpixvar16x16_v_armv6
prototype unsigned int vp8_variance_halfpixvar16x16_hv "const unsigned char *src_ptr, int source_stride, const unsigned char *ref_ptr, int ref_stride, unsigned int *sse"
specialize vp8_variance_halfpixvar16x16_hv mmx sse2 media neon
vp8_variance_halfpixvar16x16_hv_sse2=vp8_variance_halfpixvar16x16_hv_wmt
vp8_variance_halfpixvar16x16_hv_media=vp8_variance_halfpixvar16x16_hv_armv6
#
# Sum of squares (vector)
#
prototype unsigned int vp8_get_mb_ss "const short *"
specialize vp8_get_mb_ss mmx sse2
#
# SSE (Sum Squared Error)
#
prototype unsigned int vp8_sub_pixel_mse16x16 "const unsigned char *src_ptr, int source_stride, int xoffset, int yoffset, const unsigned char *ref_ptr, int Refstride, unsigned int *sse"
specialize vp8_sub_pixel_mse16x16 mmx sse2
vp8_sub_pixel_mse16x16_sse2=vp8_sub_pixel_mse16x16_wmt
prototype unsigned int vp8_mse16x16 "const unsigned char *src_ptr, int source_stride, const unsigned char *ref_ptr, int ref_stride, unsigned int *sse"
specialize vp8_mse16x16 mmx sse2 media neon
vp8_mse16x16_sse2=vp8_mse16x16_wmt
vp8_mse16x16_media=vp8_mse16x16_armv6
prototype unsigned int vp8_get4x4sse_cs "const unsigned char *src_ptr, int source_stride, const unsigned char *ref_ptr, int ref_stride"
specialize vp8_get4x4sse_cs mmx neon
#
# Single block SAD
#
prototype unsigned int vp8_sad4x4 "const unsigned char *src_ptr, int source_stride, const unsigned char *ref_ptr, int ref_stride, int max_sad"
specialize vp8_sad4x4 mmx sse2 neon
vp8_sad4x4_sse2=vp8_sad4x4_wmt
prototype unsigned int vp8_sad8x8 "const unsigned char *src_ptr, int source_stride, const unsigned char *ref_ptr, int ref_stride, int max_sad"
specialize vp8_sad8x8 mmx sse2 neon
vp8_sad8x8_sse2=vp8_sad8x8_wmt
prototype unsigned int vp8_sad8x16 "const unsigned char *src_ptr, int source_stride, const unsigned char *ref_ptr, int ref_stride, int max_sad"
specialize vp8_sad8x16 mmx sse2 neon
vp8_sad8x16_sse2=vp8_sad8x16_wmt
prototype unsigned int vp8_sad16x8 "const unsigned char *src_ptr, int source_stride, const unsigned char *ref_ptr, int ref_stride, int max_sad"
specialize vp8_sad16x8 mmx sse2 neon
vp8_sad16x8_sse2=vp8_sad16x8_wmt
prototype unsigned int vp8_sad16x16 "const unsigned char *src_ptr, int source_stride, const unsigned char *ref_ptr, int ref_stride, int max_sad"
specialize vp8_sad16x16 mmx sse2 sse3 media neon
vp8_sad16x16_sse2=vp8_sad16x16_wmt
vp8_sad16x16_media=vp8_sad16x16_armv6
#
# Multi-block SAD, comparing a reference to N blocks 1 pixel apart horizontally
#
prototype void vp8_sad4x4x3 "const unsigned char *src_ptr, int source_stride, const unsigned char *ref_ptr, int ref_stride, unsigned int *sad_array"
specialize vp8_sad4x4x3 sse3
prototype void vp8_sad8x8x3 "const unsigned char *src_ptr, int source_stride, const unsigned char *ref_ptr, int ref_stride, unsigned int *sad_array"
specialize vp8_sad8x8x3 sse3
prototype void vp8_sad8x16x3 "const unsigned char *src_ptr, int source_stride, const unsigned char *ref_ptr, int ref_stride, unsigned int *sad_array"
specialize vp8_sad8x16x3 sse3
prototype void vp8_sad16x8x3 "const unsigned char *src_ptr, int source_stride, const unsigned char *ref_ptr, int ref_stride, unsigned int *sad_array"
specialize vp8_sad16x8x3 sse3 ssse3
prototype void vp8_sad16x16x3 "const unsigned char *src_ptr, int source_stride, const unsigned char *ref_ptr, int ref_stride, unsigned int *sad_array"
specialize vp8_sad16x16x3 sse3 ssse3
# Note the only difference in the following prototypes is that they return into
# an array of short
prototype void vp8_sad4x4x8 "const unsigned char *src_ptr, int source_stride, const unsigned char *ref_ptr, int ref_stride, unsigned short *sad_array"
specialize vp8_sad4x4x8 sse4_1
vp8_sad4x4x8_sse4_1=vp8_sad4x4x8_sse4
prototype void vp8_sad8x8x8 "const unsigned char *src_ptr, int source_stride, const unsigned char *ref_ptr, int ref_stride, unsigned short *sad_array"
specialize vp8_sad8x8x8 sse4_1
vp8_sad8x8x8_sse4_1=vp8_sad8x8x8_sse4
prototype void vp8_sad8x16x8 "const unsigned char *src_ptr, int source_stride, const unsigned char *ref_ptr, int ref_stride, unsigned short *sad_array"
specialize vp8_sad8x16x8 sse4_1
vp8_sad8x16x8_sse4_1=vp8_sad8x16x8_sse4
prototype void vp8_sad16x8x8 "const unsigned char *src_ptr, int source_stride, const unsigned char *ref_ptr, int ref_stride, unsigned short *sad_array"
specialize vp8_sad16x8x8 sse4_1
vp8_sad16x8x8_sse4_1=vp8_sad16x8x8_sse4
prototype void vp8_sad16x16x8 "const unsigned char *src_ptr, int source_stride, const unsigned char *ref_ptr, int ref_stride, unsigned short *sad_array"
specialize vp8_sad16x16x8 sse4_1
vp8_sad16x16x8_sse4_1=vp8_sad16x16x8_sse4
#
# Multi-block SAD, comparing a reference to N independent blocks
#
prototype void vp8_sad4x4x4d "const unsigned char *src_ptr, int source_stride, unsigned char *ref_ptr[4], int ref_stride, unsigned int *sad_array"
specialize vp8_sad4x4x4d sse3
prototype void vp8_sad8x8x4d "const unsigned char *src_ptr, int source_stride, unsigned char *ref_ptr[4], int ref_stride, unsigned int *sad_array"
specialize vp8_sad8x8x4d sse3
prototype void vp8_sad8x16x4d "const unsigned char *src_ptr, int source_stride, unsigned char *ref_ptr[4], int ref_stride, unsigned int *sad_array"
specialize vp8_sad8x16x4d sse3
prototype void vp8_sad16x8x4d "const unsigned char *src_ptr, int source_stride, unsigned char *ref_ptr[4], int ref_stride, unsigned int *sad_array"
specialize vp8_sad16x8x4d sse3
prototype void vp8_sad16x16x4d "const unsigned char *src_ptr, int source_stride, unsigned char *ref_ptr[4], int ref_stride, unsigned int *sad_array"
specialize vp8_sad16x16x4d sse3
#
# Block copy
#
case $arch in
x86*)
prototype void vp8_copy32xn "const unsigned char *src_ptr, int source_stride, const unsigned char *ref_ptr, int ref_stride, int n"
specialize vp8_copy32xn sse2 sse3
;;
esac
#
# Structured Similarity (SSIM)
#
if [ "$CONFIG_INTERNAL_STATS" = "yes" ]; then
[ $arch = "x86_64" ] && sse2_on_x86_64=sse2
prototype void vp8_ssim_parms_8x8 "unsigned char *s, int sp, unsigned char *r, int rp, unsigned long *sum_s, unsigned long *sum_r, unsigned long *sum_sq_s, unsigned long *sum_sq_r, unsigned long *sum_sxr"
specialize vp8_ssim_parms_8x8 $sse2_on_x86_64
prototype void vp8_ssim_parms_16x16 "unsigned char *s, int sp, unsigned char *r, int rp, unsigned long *sum_s, unsigned long *sum_r, unsigned long *sum_sq_s, unsigned long *sum_sq_r, unsigned long *sum_sxr"
specialize vp8_ssim_parms_16x16 $sse2_on_x86_64
fi
# End of encoder only functions
fi
......@@ -32,32 +32,6 @@ void vp8_arch_arm_encoder_init(VP8_COMP *cpi)
#if HAVE_MEDIA
if (flags & HAS_MEDIA)
{
cpi->rtcd.variance.sad16x16 = vp8_sad16x16_armv6;
/*cpi->rtcd.variance.sad16x8 = vp8_sad16x8_c;
cpi->rtcd.variance.sad8x16 = vp8_sad8x16_c;
cpi->rtcd.variance.sad8x8 = vp8_sad8x8_c;
cpi->rtcd.variance.sad4x4 = vp8_sad4x4_c;*/
/*cpi->rtcd.variance.var4x4 = vp8_variance4x4_c;*/
cpi->rtcd.variance.var8x8 = vp8_variance8x8_armv6;
/*cpi->rtcd.variance.var8x16 = vp8_variance8x16_c;
cpi->rtcd.variance.var16x8 = vp8_variance16x8_c;*/
cpi->rtcd.variance.var16x16 = vp8_variance16x16_armv6;
/*cpi->rtcd.variance.subpixvar4x4 = vp8_sub_pixel_variance4x4_c;*/
cpi->rtcd.variance.subpixvar8x8 = vp8_sub_pixel_variance8x8_armv6;
/*cpi->rtcd.variance.subpixvar8x16 = vp8_sub_pixel_variance8x16_c;
cpi->rtcd.variance.subpixvar16x8 = vp8_sub_pixel_variance16x8_c;*/
cpi->rtcd.variance.subpixvar16x16 = vp8_sub_pixel_variance16x16_armv6;
cpi->rtcd.variance.halfpixvar16x16_h = vp8_variance_halfpixvar16x16_h_armv6;
cpi->rtcd.variance.halfpixvar16x16_v = vp8_variance_halfpixvar16x16_v_armv6;
cpi->rtcd.variance.halfpixvar16x16_hv = vp8_variance_halfpixvar16x16_hv_armv6;
cpi->rtcd.variance.mse16x16 = vp8_mse16x16_armv6;
/*cpi->rtcd.variance.getmbss = vp8_get_mb_ss_c;*/
/*cpi->rtcd.variance.get4x4sse_cs = vp8_get4x4sse_cs_c;*/
cpi->rtcd.fdct.short4x4 = vp8_short_fdct4x4_armv6;
cpi->rtcd.fdct.short8x4 = vp8_short_fdct8x4_armv6;
cpi->rtcd.fdct.fast4x4 = vp8_short_fdct4x4_armv6;
......@@ -79,32 +53,6 @@ void vp8_arch_arm_encoder_init(VP8_COMP *cpi)
#if HAVE_NEON
if (flags & HAS_NEON)
{
cpi->rtcd.variance.sad16x16 = vp8_sad16x16_neon;
cpi->rtcd.variance.sad16x8 = vp8_sad16x8_neon;
cpi->rtcd.variance.sad8x16 = vp8_sad8x16_neon;
cpi->rtcd.variance.sad8x8 = vp8_sad8x8_neon;
cpi->rtcd.variance.sad4x4 = vp8_sad4x4_neon;
/*cpi->rtcd.variance.var4x4 = vp8_variance4x4_c;*/
cpi->rtcd.variance.var8x8 = vp8_variance8x8_neon;
cpi->rtcd.variance.var8x16 = vp8_variance8x16_neon;
cpi->rtcd.variance.var16x8 = vp8_variance16x8_neon;
cpi->rtcd.variance.var16x16 = vp8_variance16x16_neon;
/*cpi->rtcd.variance.subpixvar4x4 = vp8_sub_pixel_variance4x4_c;*/
cpi->rtcd.variance.subpixvar8x8 = vp8_sub_pixel_variance8x8_neon;
/*cpi->rtcd.variance.subpixvar8x16 = vp8_sub_pixel_variance8x16_c;
cpi->rtcd.variance.subpixvar16x8 = vp8_sub_pixel_variance16x8_c;*/
cpi->rtcd.variance.subpixvar16x16 = vp8_sub_pixel_variance16x16_neon;
cpi->rtcd.variance.halfpixvar16x16_h = vp8_variance_halfpixvar16x16_h_neon;
cpi->rtcd.variance.halfpixvar16x16_v = vp8_variance_halfpixvar16x16_v_neon;
cpi->rtcd.variance.halfpixvar16x16_hv = vp8_variance_halfpixvar16x16_hv_neon;
cpi->rtcd.variance.mse16x16 = vp8_mse16x16_neon;
/*cpi->rtcd.variance.getmbss = vp8_get_mb_ss_c;*/
cpi->rtcd.variance.get4x4sse_cs = vp8_get4x4sse_cs_neon;
cpi->rtcd.fdct.short4x4 = vp8_short_fdct4x4_neon;
cpi->rtcd.fdct.short8x4 = vp8_short_fdct8x4_neon;
cpi->rtcd.fdct.fast4x4 = vp8_short_fdct4x4_neon;
......
......@@ -9,6 +9,7 @@
*/
#include "vpx_config.h"
#include "vpx_rtcd.h"
#include "vp8/encoder/variance.h"
#include "vp8/common/filter.h"
......
/*
* Copyright (c) 2010 The WebM project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef VARIANCE_ARM_H
#define VARIANCE_ARM_H
#if HAVE_MEDIA
extern prototype_sad(vp8_sad16x16_armv6);
extern prototype_variance(vp8_variance16x16_armv6);
extern prototype_variance(vp8_variance8x8_armv6);
extern prototype_subpixvariance(vp8_sub_pixel_variance16x16_armv6);
extern prototype_subpixvariance(vp8_sub_pixel_variance8x8_armv6);
extern prototype_variance(vp8_variance_halfpixvar16x16_h_armv6);
extern prototype_variance(vp8_variance_halfpixvar16x16_v_armv6);
extern prototype_variance(vp8_variance_halfpixvar16x16_hv_armv6);
extern prototype_variance(vp8_mse16x16_armv6);
#if !CONFIG_RUNTIME_CPU_DETECT
#undef vp8_variance_sad16x16
#define vp8_variance_sad16x16 vp8_sad16x16_armv6
#undef vp8_variance_subpixvar16x16
#define vp8_variance_subpixvar16x16 vp8_sub_pixel_variance16x16_armv6
#undef vp8_variance_subpixvar8x8
#define vp8_variance_subpixvar8x8 vp8_sub_pixel_variance8x8_armv6
#undef vp8_variance_var16x16
#define vp8_variance_var16x16 vp8_variance16x16_armv6
#undef vp8_variance_mse16x16
#define vp8_variance_mse16x16 vp8_mse16x16_armv6
#undef vp8_variance_var8x8
#define vp8_variance_var8x8 vp8_variance8x8_armv6
#undef vp8_variance_halfpixvar16x16_h
#define vp8_variance_halfpixvar16x16_h vp8_variance_halfpixvar16x16_h_armv6
#undef vp8_variance_halfpixvar16x16_v
#define vp8_variance_halfpixvar16x16_v vp8_variance_halfpixvar16x16_v_armv6
#undef vp8_variance_halfpixvar16x16_hv
#define vp8_variance_halfpixvar16x16_hv vp8_variance_halfpixvar16x16_hv_armv6
#endif /* !CONFIG_RUNTIME_CPU_DETECT */
#endif /* HAVE_MEDIA */
#if HAVE_NEON
extern prototype_sad(vp8_sad4x4_neon);
extern prototype_sad(vp8_sad8x8_neon);
extern prototype_sad(vp8_sad8x16_neon);
extern prototype_sad(vp8_sad16x8_neon);
extern prototype_sad(vp8_sad16x16_neon);
//extern prototype_variance(vp8_variance4x4_c);
extern prototype_variance(vp8_variance8x8_neon);
extern prototype_variance(vp8_variance8x16_neon);
extern prototype_variance(vp8_variance16x8_neon);
extern prototype_variance(vp8_variance16x16_neon);
//extern prototype_subpixvariance(vp8_sub_pixel_variance4x4_c);
extern prototype_subpixvariance(vp8_sub_pixel_variance8x8_neon);
//extern prototype_subpixvariance(vp8_sub_pixel_variance8x16_c);
//extern prototype_subpixvariance(vp8_sub_pixel_variance16x8_c);
extern prototype_subpixvariance(vp8_sub_pixel_variance16x16_neon);
extern prototype_subpixvariance(vp8_sub_pixel_variance16x16_neon_func);
extern prototype_variance(vp8_variance_halfpixvar16x16_h_neon);
extern prototype_variance(vp8_variance_halfpixvar16x16_v_neon);
extern prototype_variance(vp8_variance_halfpixvar16x16_hv_neon);
//extern prototype_getmbss(vp8_get_mb_ss_c);
extern prototype_variance(vp8_mse16x16_neon);
extern prototype_get16x16prederror(vp8_get4x4sse_cs_neon);
#if !CONFIG_RUNTIME_CPU_DETECT
#undef vp8_variance_sad4x4
#define vp8_variance_sad4x4 vp8_sad4x4_neon
#undef vp8_variance_sad8x8
#define vp8_variance_sad8x8 vp8_sad8x8_neon
#undef vp8_variance_sad8x16
#define vp8_variance_sad8x16 vp8_sad8x16_neon
#undef vp8_variance_sad16x8
#define vp8_variance_sad16x8 vp8_sad16x8_neon
#undef vp8_variance_sad16x16
#define vp8_variance_sad16x16 vp8_sad16x16_neon
//#undef vp8_variance_var4x4
//#define vp8_variance_var4x4 vp8_variance4x4_c
#undef vp8_variance_var8x8
#define vp8_variance_var8x8 vp8_variance8x8_neon
#undef vp8_variance_var8x16
#define vp8_variance_var8x16 vp8_variance8x16_neon
#undef vp8_variance_var16x8
#define vp8_variance_var16x8 vp8_variance16x8_neon
#undef vp8_variance_var16x16
#define vp8_variance_var16x16 vp8_variance16x16_neon
//#undef vp8_variance_subpixvar4x4
//#define vp8_variance_subpixvar4x4 vp8_sub_pixel_variance4x4_c
#undef vp8_variance_subpixvar8x8
#define vp8_variance_subpixvar8x8 vp8_sub_pixel_variance8x8_neon
//#undef vp8_variance_subpixvar8x16
//#define vp8_variance_subpixvar8x16 vp8_sub_pixel_variance8x16_c
//#undef vp8_variance_subpixvar16x8
//#define vp8_variance_subpixvar16x8 vp8_sub_pixel_variance16x8_c
#undef vp8_variance_subpixvar16x16
#define vp8_variance_subpixvar16x16 vp8_sub_pixel_variance16x16_neon
#undef vp8_variance_halfpixvar16x16_h
#define vp8_variance_halfpixvar16x16_h vp8_variance_halfpixvar16x16_h_neon
#undef vp8_variance_halfpixvar16x16_v
#define vp8_variance_halfpixvar16x16_v vp8_variance_halfpixvar16x16_v_neon
#undef vp8_variance_halfpixvar16x16_hv
#define vp8_variance_halfpixvar16x16_hv vp8_variance_halfpixvar16x16_hv_neon
//#undef vp8_variance_getmbss
//#define vp8_variance_getmbss vp8_get_mb_ss_c
#undef vp8_variance_mse16x16
#define vp8_variance_mse16x16 vp8_mse16x16_neon
#undef vp8_variance_get4x4sse_cs
#define vp8_variance_get4x4sse_cs vp8_get4x4sse_cs_neon
#endif /* !CONFIG_RUNTIME_CPU_DETECT */
#endif /* HAVE_NEON */
#endif
......@@ -96,7 +96,7 @@ static unsigned int tt_activity_measure( VP8_COMP *cpi, MACROBLOCK *x )
* lambda using a non-linear combination (e.g., the smallest, or second
* smallest, etc.).
*/
act = VARIANCE_INVOKE(&cpi->rtcd.variance, var16x16)(x->src.y_buffer,
act = vp8_variance16x16(x->src.y_buffer,
x->src.y_stride, VP8_VAR_OFFS, 0, &sse);
act = act<<4;
......
......@@ -53,7 +53,7 @@ int vp8_encode_intra(VP8_COMP *cpi, MACROBLOCK *x, int use_dc_pred)
}
}
intra_pred_var = VARIANCE_INVOKE(&cpi->rtcd.variance, getmbss)(x->src_diff);
intra_pred_var = vp8_get_mb_ss(x->src_diff);
return intra_pred_var;
}
......
......@@ -409,7 +409,7 @@ static void zz_motion_search( VP8_COMP *cpi, MACROBLOCK * x, YV12_BUFFER_CONFIG
ref_ptr = (unsigned char *)(*(d->base_pre) + d->pre );
VARIANCE_INVOKE(IF_RTCD(&cpi->rtcd.variance), mse16x16) ( src_ptr, src_stride, ref_ptr, ref_stride, (unsigned int *)(best_motion_err));
vp8_mse16x16 ( src_ptr, src_stride, ref_ptr, ref_stride, (unsigned int *)(best_motion_err));
}
static void first_pass_motion_search(VP8_COMP *cpi, MACROBLOCK *x,
......@@ -433,7 +433,7 @@ static void first_pass_motion_search(VP8_COMP *cpi, MACROBLOCK *x,
int new_mv_mode_penalty = 256;
// override the default variance function to use MSE
v_fn_ptr.vf = VARIANCE_INVOKE(IF_RTCD(&cpi->rtcd.variance), mse16x16);
v_fn_ptr.vf = vp8_mse16x16;
// Set up pointers for this macro block recon buffer
xd->pre.y_buffer = recon_buffer->y_buffer + recon_yoffset;
......
......@@ -25,53 +25,6 @@ extern void vp8_yv12_copy_partial_frame(YV12_BUFFER_CONFIG *src_ybc,
void vp8_cmachine_specific_config(VP8_COMP *cpi)
{
#if CONFIG_RUNTIME_CPU_DETECT
cpi->rtcd.variance.sad16x16 = vp8_sad16x16_c;
cpi->rtcd.variance.sad16x8 = vp8_sad16x8_c;
cpi->rtcd.variance.sad8x16 = vp8_sad8x16_c;
cpi->rtcd.variance.sad8x8 = vp8_sad8x8_c;
cpi->rtcd.variance.sad4x4 = vp8_sad4x4_c;
cpi->rtcd.variance.sad16x16x3 = vp8_sad16x16x3_c;
cpi->rtcd.variance.sad16x8x3 = vp8_sad16x8x3_c;
cpi->rtcd.variance.sad8x16x3 = vp8_sad8x16x3_c;
cpi->rtcd.variance.sad8x8x3 = vp8_sad8x8x3_c;
cpi->rtcd.variance.sad4x4x3 = vp8_sad4x4x3_c;
cpi->rtcd.variance.sad16x16x8 = vp8_sad16x16x8_c;
cpi->rtcd.variance.sad16x8x8 = vp8_sad16x8x8_c;
cpi->rtcd.variance.sad8x16x8 = vp8_sad8x16x8_c;
cpi->rtcd.variance.sad8x8x8 = vp8_sad8x8x8_c;
cpi->rtcd.variance.sad4x4x8 = vp8_sad4x4x8_c;
cpi->rtcd.variance.sad16x16x4d = vp8_sad16x16x4d_c;
cpi->rtcd.variance.sad16x8x4d = vp8_sad16x8x4d_c;
cpi->rtcd.variance.sad8x16x4d = vp8_sad8x16x4d_c;
cpi->rtcd.variance.sad8x8x4d = vp8_sad8x8x4d_c;
cpi->rtcd.variance.sad4x4x4d = vp8_sad4x4x4d_c;
#if ARCH_X86 || ARCH_X86_64
cpi->rtcd.variance.copy32xn = vp8_copy32xn_c;
#endif
cpi->rtcd.variance.var4x4 = vp8_variance4x4_c;
cpi->rtcd.variance.var8x8 = vp8_variance8x8_c;
cpi->rtcd.variance.var8x16 = vp8_variance8x16_c;
cpi->rtcd.variance.var16x8 = vp8_variance16x8_c;
cpi->rtcd.variance.var16x16 = vp8_variance16x16_c;
cpi->rtcd.variance.subpixvar4x4 = vp8_sub_pixel_variance4x4_c;
cpi->rtcd.variance.subpixvar8x8 = vp8_sub_pixel_variance8x8_c;
cpi->rtcd.variance.subpixvar8x16 = vp8_sub_pixel_variance8x16_c;
cpi->rtcd.variance.subpixvar16x8 = vp8_sub_pixel_variance16x8_c;
cpi->rtcd.variance.subpixvar16x16 = vp8_sub_pixel_variance16x16_c;
cpi->rtcd.variance.halfpixvar16x16_h = vp8_variance_halfpixvar16x16_h_c;
cpi->rtcd.variance.halfpixvar16x16_v = vp8_variance_halfpixvar16x16_v_c;
cpi->rtcd.variance.halfpixvar16x16_hv = vp8_variance_halfpixvar16x16_hv_c;
cpi->rtcd.variance.subpixmse16x16 = vp8_sub_pixel_mse16x16_c;
cpi->rtcd.variance.mse16x16 = vp8_mse16x16_c;
cpi->rtcd.variance.getmbss = vp8_get_mb_ss_c;
cpi->rtcd.variance.get4x4sse_cs = vp8_get4x4sse_cs_c;
cpi->rtcd.fdct.short4x4 = vp8_short_fdct4x4_c;
cpi->rtcd.fdct.short8x4 = vp8_short_fdct8x4_c;
cpi->rtcd.fdct.fast4x4 = vp8_short_fdct4x4_c;
......@@ -95,10 +48,6 @@ void vp8_cmachine_specific_config(VP8_COMP *cpi)
#if !(CONFIG_REALTIME_ONLY)
cpi->rtcd.temporal.apply = vp8_temporal_filter_apply_c;
#endif
#if CONFIG_INTERNAL_STATS
cpi->rtcd.variance.ssimpf_8x8 = vp8_ssim_parms_8x8_c;
cpi->rtcd.variance.ssimpf_16x16 = vp8_ssim_parms_16x16_c;
#endif
#endif
// Pure C:
......
......@@ -69,7 +69,7 @@ extern void vp8_yv12_copy_src_frame_func_neon(YV12_BUFFER_CONFIG *src_ybc, YV12_
int vp8_estimate_entropy_savings(VP8_COMP *cpi);
int vp8_calc_ss_err(YV12_BUFFER_CONFIG *source, YV12_BUFFER_CONFIG *dest, const vp8_variance_rtcd_vtable_t *rtcd);
int vp8_calc_ss_err(YV12_BUFFER_CONFIG *source, YV12_BUFFER_CONFIG *dest);
extern void vp8_temporal_filter_prepare_c(VP8_COMP *cpi, int distance);
......@@ -85,8 +85,7 @@ extern double vp8_calc_ssim
YV12_BUFFER_CONFIG *source,
YV12_BUFFER_CONFIG *dest,
int lumamask,
double *weight,
const vp8_variance_rtcd_vtable_t *rtcd
double *weight
);
......@@ -96,8 +95,7 @@ extern double vp8_calc_ssimg
YV12_BUFFER_CONFIG *dest,
double *ssim_y,
double *ssim_u,
double *ssim_v,
const vp8_variance_rtcd_vtable_t *rtcd
double *ssim_v
);
......@@ -1947,62 +1945,62 @@ struct VP8_COMP* vp8_create_compressor(VP8_CONFIG *oxcf)
vp8cx_create_encoder_threads(cpi);
#endif
cpi->fn_ptr[BLOCK_16X16].sdf = VARIANCE_INVOKE(&cpi->rtcd.variance, sad16x16);
cpi->fn_ptr[BLOCK_16X16].vf = VARIANCE_INVOKE(&cpi->rtcd.variance, var16x16);
cpi->fn_ptr[BLOCK_16X16].svf = VARIANCE_INVOKE(&cpi->rtcd.variance, subpixvar16x16);
cpi->fn_ptr[BLOCK_16X16].svf_halfpix_h = VARIANCE_INVOKE(&cpi->rtcd.variance, halfpixvar16x16_h);
cpi->fn_ptr[BLOCK_16X16].svf_halfpix_v = VARIANCE_INVOKE(&cpi->rtcd.variance, halfpixvar16x16_v);
cpi->fn_ptr[BLOCK_16X16].svf_halfpix_hv = VARIANCE_INVOKE(&cpi->rtcd.variance, halfpixvar16x16_hv);
cpi->fn_ptr[BLOCK_16X16].sdx3f = VARIANCE_INVOKE(&cpi->rtcd.variance, sad16x16x3);
cpi->fn_ptr[BLOCK_16X16].sdx8f = VARIANCE_INVOKE(&cpi->rtcd.variance, sad16x16x8);
cpi->fn_ptr[BLOCK_16X16].sdx4df = VARIANCE_INVOKE(&cpi->rtcd.variance, sad16x16x4d);
cpi->fn_ptr[BLOCK_16X8].sdf = VARIANCE_INVOKE(&cpi->rtcd.variance, sad16x8);
cpi->fn_ptr[BLOCK_16X8].vf = VARIANCE_INVOKE(&cpi->rtcd.variance, var16x8);
cpi->fn_ptr[BLOCK_16X8].svf = VARIANCE_INVOKE(&cpi->rtcd.variance, subpixvar16x8);