Commit e34e6840 authored by James Zern's avatar James Zern Committed by Gerrit Code Review

Merge changes If31d36c8,I10b947e7

* changes:
  vpx_dsp,add_noise: remove mmx implementation
  vpx_dsp: remove mmx variance implementations
parents 45392679 462e0ff8
......@@ -185,11 +185,6 @@ TEST_P(AddNoiseTest, CheckCvsAssembly) {
INSTANTIATE_TEST_CASE_P(C, AddNoiseTest,
::testing::Values(vpx_plane_add_noise_c));
#if HAVE_MMX
INSTANTIATE_TEST_CASE_P(MMX, AddNoiseTest,
::testing::Values(vpx_plane_add_noise_mmx));
#endif
#if HAVE_SSE2
INSTANTIATE_TEST_CASE_P(SSE2, AddNoiseTest,
::testing::Values(vpx_plane_add_noise_sse2));
......
......@@ -976,16 +976,6 @@ INSTANTIATE_TEST_CASE_P(
make_tuple(2, 2, &vpx_highbd_12_sub_pixel_avg_variance4x4_c, 12)));
#endif // CONFIG_VP9_HIGHBITDEPTH
#if HAVE_MMX
INSTANTIATE_TEST_CASE_P(
MMX, VpxSubpelVarianceTest,
::testing::Values(make_tuple(4, 4, &vpx_sub_pixel_variance16x16_mmx, 0),
make_tuple(4, 3, &vpx_sub_pixel_variance16x8_mmx, 0),
make_tuple(3, 4, &vpx_sub_pixel_variance8x16_mmx, 0),
make_tuple(3, 3, &vpx_sub_pixel_variance8x8_mmx, 0),
make_tuple(2, 2, &vpx_sub_pixel_variance4x4_mmx, 0)));
#endif // HAVE_MMX
#if HAVE_SSE2
INSTANTIATE_TEST_CASE_P(SSE2, SumOfSquaresTest,
::testing::Values(vpx_get_mb_ss_sse2));
......
......@@ -55,7 +55,6 @@ endif # CONFIG_VP9_HIGHBITDEPTH
ifneq ($(filter yes,$(CONFIG_POSTPROC) $(CONFIG_VP9_POSTPROC)),)
DSP_SRCS-yes += add_noise.c
DSP_SRCS-$(HAVE_MSA) += mips/add_noise_msa.c
DSP_SRCS-$(HAVE_MMX) += x86/add_noise_mmx.asm
DSP_SRCS-$(HAVE_SSE2) += x86/add_noise_sse2.asm
endif # CONFIG_POSTPROC
......@@ -322,8 +321,6 @@ DSP_SRCS-$(HAVE_NEON) += arm/variance_neon.c
DSP_SRCS-$(HAVE_MSA) += mips/variance_msa.c
DSP_SRCS-$(HAVE_MSA) += mips/sub_pixel_variance_msa.c
DSP_SRCS-$(HAVE_MMX) += x86/variance_mmx.c
DSP_SRCS-$(HAVE_MMX) += x86/variance_impl_mmx.asm
DSP_SRCS-$(HAVE_SSE) += x86/variance_sse2.c
DSP_SRCS-$(HAVE_SSE2) += x86/variance_sse2.c # Contains SSE2 and SSSE3
DSP_SRCS-$(HAVE_SSE2) += x86/halfpix_variance_sse2.c
......
......@@ -1478,16 +1478,16 @@ add_proto qw/uint32_t vpx_sub_pixel_variance16x32/, "const uint8_t *src_ptr, int
specialize qw/vpx_sub_pixel_variance16x32 msa/, "$sse2_x86inc", "$ssse3_x86inc";
add_proto qw/uint32_t vpx_sub_pixel_variance16x16/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse";
specialize qw/vpx_sub_pixel_variance16x16 mmx media neon msa/, "$sse2_x86inc", "$ssse3_x86inc";
specialize qw/vpx_sub_pixel_variance16x16 media neon msa/, "$sse2_x86inc", "$ssse3_x86inc";
add_proto qw/uint32_t vpx_sub_pixel_variance16x8/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse";
specialize qw/vpx_sub_pixel_variance16x8 mmx msa/, "$sse2_x86inc", "$ssse3_x86inc";
specialize qw/vpx_sub_pixel_variance16x8 msa/, "$sse2_x86inc", "$ssse3_x86inc";
add_proto qw/uint32_t vpx_sub_pixel_variance8x16/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse";
specialize qw/vpx_sub_pixel_variance8x16 mmx msa/, "$sse2_x86inc", "$ssse3_x86inc";
specialize qw/vpx_sub_pixel_variance8x16 msa/, "$sse2_x86inc", "$ssse3_x86inc";
add_proto qw/uint32_t vpx_sub_pixel_variance8x8/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse";
specialize qw/vpx_sub_pixel_variance8x8 mmx media neon msa/, "$sse2_x86inc", "$ssse3_x86inc";
specialize qw/vpx_sub_pixel_variance8x8 media neon msa/, "$sse2_x86inc", "$ssse3_x86inc";
add_proto qw/uint32_t vpx_sub_pixel_variance8x4/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse";
specialize qw/vpx_sub_pixel_variance8x4 msa/, "$sse2_x86inc", "$ssse3_x86inc";
......@@ -1496,7 +1496,7 @@ add_proto qw/uint32_t vpx_sub_pixel_variance4x8/, "const uint8_t *src_ptr, int s
specialize qw/vpx_sub_pixel_variance4x8 msa/, "$sse2_x86inc", "$ssse3_x86inc";
add_proto qw/uint32_t vpx_sub_pixel_variance4x4/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse";
specialize qw/vpx_sub_pixel_variance4x4 mmx msa/, "$sse2_x86inc", "$ssse3_x86inc";
specialize qw/vpx_sub_pixel_variance4x4 msa/, "$sse2_x86inc", "$ssse3_x86inc";
add_proto qw/uint32_t vpx_sub_pixel_avg_variance64x64/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
specialize qw/vpx_sub_pixel_avg_variance64x64 avx2 msa/, "$sse2_x86inc", "$ssse3_x86inc";
......@@ -1541,13 +1541,13 @@ add_proto qw/uint32_t vpx_sub_pixel_avg_variance4x4/, "const uint8_t *src_ptr, i
# Specialty Subpixel
#
add_proto qw/uint32_t vpx_variance_halfpixvar16x16_h/, "const unsigned char *src_ptr, int source_stride, const unsigned char *ref_ptr, int ref_stride, uint32_t *sse";
specialize qw/vpx_variance_halfpixvar16x16_h mmx sse2 media/;
specialize qw/vpx_variance_halfpixvar16x16_h sse2 media/;
add_proto qw/uint32_t vpx_variance_halfpixvar16x16_v/, "const unsigned char *src_ptr, int source_stride, const unsigned char *ref_ptr, int ref_stride, uint32_t *sse";
specialize qw/vpx_variance_halfpixvar16x16_v mmx sse2 media/;
specialize qw/vpx_variance_halfpixvar16x16_v sse2 media/;
add_proto qw/uint32_t vpx_variance_halfpixvar16x16_hv/, "const unsigned char *src_ptr, int source_stride, const unsigned char *ref_ptr, int ref_stride, uint32_t *sse";
specialize qw/vpx_variance_halfpixvar16x16_hv mmx sse2 media/;
specialize qw/vpx_variance_halfpixvar16x16_hv sse2 media/;
if (vpx_config("CONFIG_VP9_HIGHBITDEPTH") eq "yes") {
add_proto qw/unsigned int vpx_highbd_12_variance64x64/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
......@@ -1913,7 +1913,7 @@ if (vpx_config("CONFIG_VP9_HIGHBITDEPTH") eq "yes") {
#
if (vpx_config("CONFIG_POSTPROC") eq "yes" || vpx_config("CONFIG_VP9_POSTPROC") eq "yes") {
add_proto qw/void vpx_plane_add_noise/, "uint8_t *Start, char *noise, char blackclamp[16], char whiteclamp[16], char bothclamp[16], unsigned int Width, unsigned int Height, int Pitch";
specialize qw/vpx_plane_add_noise mmx sse2 msa/;
specialize qw/vpx_plane_add_noise sse2 msa/;
}
} # CONFIG_ENCODERS || CONFIG_POSTPROC || CONFIG_VP9_POSTPROC
......
;
; Copyright (c) 2015 The WebM project authors. All Rights Reserved.
;
; Use of this source code is governed by a BSD-style license
; that can be found in the LICENSE file in the root of the source
; tree. An additional intellectual property rights grant can be found
; in the file PATENTS. All contributing project authors may
; be found in the AUTHORS file in the root of the source tree.
;
%include "vpx_ports/x86_abi_support.asm"
;void vpx_plane_add_noise_mmx (unsigned char *Start, unsigned char *noise,
; unsigned char blackclamp[16],
; unsigned char whiteclamp[16],
; unsigned char bothclamp[16],
; unsigned int Width, unsigned int Height, int Pitch)
global sym(vpx_plane_add_noise_mmx) PRIVATE
sym(vpx_plane_add_noise_mmx):
push rbp
mov rbp, rsp
SHADOW_ARGS_TO_STACK 8
GET_GOT rbx
push rsi
push rdi
; end prolog
; get the clamps in registers
mov rdx, arg(2) ; blackclamp
movq mm3, [rdx]
mov rdx, arg(3) ; whiteclamp
movq mm4, [rdx]
mov rdx, arg(4) ; bothclamp
movq mm5, [rdx]
.addnoise_loop:
call sym(LIBVPX_RAND) WRT_PLT
mov rcx, arg(1) ;noise
and rax, 0xff
add rcx, rax
mov rdi, rcx
movsxd rcx, dword arg(5) ;[Width]
mov rsi, arg(0) ;Pos
xor rax,rax
.addnoise_nextset:
movq mm1,[rsi+rax] ; get the source
psubusb mm1, mm3 ; subtract black clamp
paddusb mm1, mm5 ; add both clamp
psubusb mm1, mm4 ; subtract whiteclamp
movq mm2,[rdi+rax] ; get the noise for this line
paddb mm1,mm2 ; add it in
movq [rsi+rax],mm1 ; store the result
add rax,8 ; move to the next line
cmp rax, rcx
jl .addnoise_nextset
movsxd rax, dword arg(7) ; Pitch
add arg(0), rax ; Start += Pitch
sub dword arg(6), 1 ; Height -= 1
jg .addnoise_loop
; begin epilog
pop rdi
pop rsi
RESTORE_GOT
UNSHADOW_ARGS
pop rbp
ret
SECTION_RODATA
align 16
Blur:
times 16 dw 16
times 8 dw 64
times 16 dw 16
times 8 dw 0
rd:
times 4 dw 0x40
This diff is collapsed.
/*
* Copyright (c) 2010 The WebM project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "./vpx_dsp_rtcd.h"
#include "vpx_ports/mem.h"
DECLARE_ALIGNED(16, static const int16_t, bilinear_filters_mmx[8][8]) = {
{ 128, 128, 128, 128, 0, 0, 0, 0 },
{ 112, 112, 112, 112, 16, 16, 16, 16 },
{ 96, 96, 96, 96, 32, 32, 32, 32 },
{ 80, 80, 80, 80, 48, 48, 48, 48 },
{ 64, 64, 64, 64, 64, 64, 64, 64 },
{ 48, 48, 48, 48, 80, 80, 80, 80 },
{ 32, 32, 32, 32, 96, 96, 96, 96 },
{ 16, 16, 16, 16, 112, 112, 112, 112 }
};
extern void vpx_filter_block2d_bil4x4_var_mmx(const unsigned char *ref_ptr,
int ref_pixels_per_line,
const unsigned char *src_ptr,
int src_pixels_per_line,
const int16_t *HFilter,
const int16_t *VFilter,
int *sum,
unsigned int *sumsquared);
extern void vpx_filter_block2d_bil_var_mmx(const unsigned char *ref_ptr,
int ref_pixels_per_line,
const unsigned char *src_ptr,
int src_pixels_per_line,
unsigned int Height,
const int16_t *HFilter,
const int16_t *VFilter,
int *sum,
unsigned int *sumsquared);
uint32_t vpx_sub_pixel_variance4x4_mmx(const uint8_t *a, int a_stride,
int xoffset, int yoffset,
const uint8_t *b, int b_stride,
uint32_t *sse) {
int xsum;
unsigned int xxsum;
vpx_filter_block2d_bil4x4_var_mmx(a, a_stride, b, b_stride,
bilinear_filters_mmx[xoffset],
bilinear_filters_mmx[yoffset],
&xsum, &xxsum);
*sse = xxsum;
return (xxsum - (((unsigned int)xsum * xsum) >> 4));
}
uint32_t vpx_sub_pixel_variance8x8_mmx(const uint8_t *a, int a_stride,
int xoffset, int yoffset,
const uint8_t *b, int b_stride,
uint32_t *sse) {
int xsum;
uint32_t xxsum;
vpx_filter_block2d_bil_var_mmx(a, a_stride, b, b_stride, 8,
bilinear_filters_mmx[xoffset],
bilinear_filters_mmx[yoffset],
&xsum, &xxsum);
*sse = xxsum;
return (xxsum - (((uint32_t)xsum * xsum) >> 6));
}
uint32_t vpx_sub_pixel_variance16x16_mmx(const uint8_t *a, int a_stride,
int xoffset, int yoffset,
const uint8_t *b, int b_stride,
uint32_t *sse) {
int xsum0, xsum1;
unsigned int xxsum0, xxsum1;
vpx_filter_block2d_bil_var_mmx(a, a_stride, b, b_stride, 16,
bilinear_filters_mmx[xoffset],
bilinear_filters_mmx[yoffset],
&xsum0, &xxsum0);
vpx_filter_block2d_bil_var_mmx(a + 8, a_stride, b + 8, b_stride, 16,
bilinear_filters_mmx[xoffset],
bilinear_filters_mmx[yoffset],
&xsum1, &xxsum1);
xsum0 += xsum1;
xxsum0 += xxsum1;
*sse = xxsum0;
return (xxsum0 - (((uint32_t)xsum0 * xsum0) >> 8));
}
uint32_t vpx_sub_pixel_variance16x8_mmx(const uint8_t *a, int a_stride,
int xoffset, int yoffset,
const uint8_t *b, int b_stride,
uint32_t *sse) {
int xsum0, xsum1;
unsigned int xxsum0, xxsum1;
vpx_filter_block2d_bil_var_mmx(a, a_stride, b, b_stride, 8,
bilinear_filters_mmx[xoffset],
bilinear_filters_mmx[yoffset],
&xsum0, &xxsum0);
vpx_filter_block2d_bil_var_mmx(a + 8, a_stride, b + 8, b_stride, 8,
bilinear_filters_mmx[xoffset],
bilinear_filters_mmx[yoffset],
&xsum1, &xxsum1);
xsum0 += xsum1;
xxsum0 += xxsum1;
*sse = xxsum0;
return (xxsum0 - (((uint32_t)xsum0 * xsum0) >> 7));
}
uint32_t vpx_sub_pixel_variance8x16_mmx(const uint8_t *a, int a_stride,
int xoffset, int yoffset,
const uint8_t *b, int b_stride,
uint32_t *sse) {
int xsum;
unsigned int xxsum;
vpx_filter_block2d_bil_var_mmx(a, a_stride, b, b_stride, 16,
bilinear_filters_mmx[xoffset],
bilinear_filters_mmx[yoffset],
&xsum, &xxsum);
*sse = xxsum;
return (xxsum - (((uint32_t)xsum * xsum) >> 7));
}
uint32_t vpx_variance_halfpixvar16x16_h_mmx(const uint8_t *a, int a_stride,
const uint8_t *b, int b_stride,
uint32_t *sse) {
return vpx_sub_pixel_variance16x16_mmx(a, a_stride, 4, 0, b, b_stride, sse);
}
uint32_t vpx_variance_halfpixvar16x16_v_mmx(const uint8_t *a, int a_stride,
const uint8_t *b, int b_stride,
uint32_t *sse) {
return vpx_sub_pixel_variance16x16_mmx(a, a_stride, 0, 4, b, b_stride, sse);
}
uint32_t vpx_variance_halfpixvar16x16_hv_mmx(const uint8_t *a, int a_stride,
const uint8_t *b, int b_stride,
uint32_t *sse) {
return vpx_sub_pixel_variance16x16_mmx(a, a_stride, 4, 4, b, b_stride, sse);
}
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment