Commit a42df86c authored by Parag Salasakar's avatar Parag Salasakar

mips msa vp9 subpel variance optimization

average improvement ~3x-5x

Change-Id: I4cbba2711467b0e205904769ebbb4a1fcbb1a311
parent c96bb800
......@@ -2058,5 +2058,43 @@ INSTANTIATE_TEST_CASE_P(
make_tuple(3, 2, variance8x4_msa, 0),
make_tuple(2, 3, variance4x8_msa, 0),
make_tuple(2, 2, variance4x4_msa, 0)));
#if CONFIG_VP9_ENCODER
const SubpixVarMxNFunc subpel_variance4x4_msa = vp9_sub_pixel_variance4x4_msa;
const SubpixVarMxNFunc subpel_variance4x8_msa = vp9_sub_pixel_variance4x8_msa;
const SubpixVarMxNFunc subpel_variance8x4_msa = vp9_sub_pixel_variance8x4_msa;
const SubpixVarMxNFunc subpel_variance8x8_msa = vp9_sub_pixel_variance8x8_msa;
const SubpixVarMxNFunc subpel_variance8x16_msa = vp9_sub_pixel_variance8x16_msa;
const SubpixVarMxNFunc subpel_variance16x8_msa = vp9_sub_pixel_variance16x8_msa;
const SubpixVarMxNFunc subpel_variance16x16_msa =
vp9_sub_pixel_variance16x16_msa;
const SubpixVarMxNFunc subpel_variance16x32_msa =
vp9_sub_pixel_variance16x32_msa;
const SubpixVarMxNFunc subpel_variance32x16_msa =
vp9_sub_pixel_variance32x16_msa;
const SubpixVarMxNFunc subpel_variance32x32_msa =
vp9_sub_pixel_variance32x32_msa;
const SubpixVarMxNFunc subpel_variance32x64_msa =
vp9_sub_pixel_variance32x64_msa;
const SubpixVarMxNFunc subpel_variance64x32_msa =
vp9_sub_pixel_variance64x32_msa;
const SubpixVarMxNFunc subpel_variance64x64_msa =
vp9_sub_pixel_variance64x64_msa;
INSTANTIATE_TEST_CASE_P(
MSA, VP9SubpelVarianceTest,
::testing::Values(make_tuple(2, 2, subpel_variance4x4_msa, 0),
make_tuple(2, 3, subpel_variance4x8_msa, 0),
make_tuple(3, 2, subpel_variance8x4_msa, 0),
make_tuple(3, 3, subpel_variance8x8_msa, 0),
make_tuple(3, 4, subpel_variance8x16_msa, 0),
make_tuple(4, 3, subpel_variance16x8_msa, 0),
make_tuple(4, 4, subpel_variance16x16_msa, 0),
make_tuple(4, 5, subpel_variance16x32_msa, 0),
make_tuple(5, 4, subpel_variance32x16_msa, 0),
make_tuple(5, 5, subpel_variance32x32_msa, 0),
make_tuple(5, 6, subpel_variance32x64_msa, 0),
make_tuple(6, 5, subpel_variance64x32_msa, 0),
make_tuple(6, 6, subpel_variance64x64_msa, 0)));
#endif // CONFIG_VP9_ENCODER
#endif // HAVE_MSA
} // namespace
......@@ -798,80 +798,80 @@ if (vpx_config("CONFIG_VP9_ENCODER") eq "yes") {
# variance
add_proto qw/unsigned int vp9_sub_pixel_variance64x64/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
specialize qw/vp9_sub_pixel_variance64x64 avx2 neon/, "$sse2_x86inc", "$ssse3_x86inc";
specialize qw/vp9_sub_pixel_variance64x64 avx2 neon msa/, "$sse2_x86inc", "$ssse3_x86inc";
add_proto qw/unsigned int vp9_sub_pixel_avg_variance64x64/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse, const uint8_t *second_pred";
specialize qw/vp9_sub_pixel_avg_variance64x64 avx2/, "$sse2_x86inc", "$ssse3_x86inc";
add_proto qw/unsigned int vp9_sub_pixel_variance32x64/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
specialize qw/vp9_sub_pixel_variance32x64/, "$sse2_x86inc", "$ssse3_x86inc";
specialize qw/vp9_sub_pixel_variance32x64 msa/, "$sse2_x86inc", "$ssse3_x86inc";
add_proto qw/unsigned int vp9_sub_pixel_avg_variance32x64/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse, const uint8_t *second_pred";
specialize qw/vp9_sub_pixel_avg_variance32x64/, "$sse2_x86inc", "$ssse3_x86inc";
add_proto qw/unsigned int vp9_sub_pixel_variance64x32/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
specialize qw/vp9_sub_pixel_variance64x32/, "$sse2_x86inc", "$ssse3_x86inc";
specialize qw/vp9_sub_pixel_variance64x32 msa/, "$sse2_x86inc", "$ssse3_x86inc";
add_proto qw/unsigned int vp9_sub_pixel_avg_variance64x32/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse, const uint8_t *second_pred";
specialize qw/vp9_sub_pixel_avg_variance64x32/, "$sse2_x86inc", "$ssse3_x86inc";
add_proto qw/unsigned int vp9_sub_pixel_variance32x16/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
specialize qw/vp9_sub_pixel_variance32x16/, "$sse2_x86inc", "$ssse3_x86inc";
specialize qw/vp9_sub_pixel_variance32x16 msa/, "$sse2_x86inc", "$ssse3_x86inc";
add_proto qw/unsigned int vp9_sub_pixel_avg_variance32x16/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse, const uint8_t *second_pred";
specialize qw/vp9_sub_pixel_avg_variance32x16/, "$sse2_x86inc", "$ssse3_x86inc";
add_proto qw/unsigned int vp9_sub_pixel_variance16x32/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
specialize qw/vp9_sub_pixel_variance16x32/, "$sse2_x86inc", "$ssse3_x86inc";
specialize qw/vp9_sub_pixel_variance16x32 msa/, "$sse2_x86inc", "$ssse3_x86inc";
add_proto qw/unsigned int vp9_sub_pixel_avg_variance16x32/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse, const uint8_t *second_pred";
specialize qw/vp9_sub_pixel_avg_variance16x32/, "$sse2_x86inc", "$ssse3_x86inc";
add_proto qw/unsigned int vp9_sub_pixel_variance32x32/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
specialize qw/vp9_sub_pixel_variance32x32 avx2 neon/, "$sse2_x86inc", "$ssse3_x86inc";
specialize qw/vp9_sub_pixel_variance32x32 avx2 neon msa/, "$sse2_x86inc", "$ssse3_x86inc";
add_proto qw/unsigned int vp9_sub_pixel_avg_variance32x32/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse, const uint8_t *second_pred";
specialize qw/vp9_sub_pixel_avg_variance32x32 avx2/, "$sse2_x86inc", "$ssse3_x86inc";
add_proto qw/unsigned int vp9_sub_pixel_variance16x16/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
specialize qw/vp9_sub_pixel_variance16x16 neon/, "$sse2_x86inc", "$ssse3_x86inc";
specialize qw/vp9_sub_pixel_variance16x16 neon msa/, "$sse2_x86inc", "$ssse3_x86inc";
add_proto qw/unsigned int vp9_sub_pixel_avg_variance16x16/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse, const uint8_t *second_pred";
specialize qw/vp9_sub_pixel_avg_variance16x16/, "$sse2_x86inc", "$ssse3_x86inc";
add_proto qw/unsigned int vp9_sub_pixel_variance8x16/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
specialize qw/vp9_sub_pixel_variance8x16/, "$sse2_x86inc", "$ssse3_x86inc";
specialize qw/vp9_sub_pixel_variance8x16 msa/, "$sse2_x86inc", "$ssse3_x86inc";
add_proto qw/unsigned int vp9_sub_pixel_avg_variance8x16/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse, const uint8_t *second_pred";
specialize qw/vp9_sub_pixel_avg_variance8x16/, "$sse2_x86inc", "$ssse3_x86inc";
add_proto qw/unsigned int vp9_sub_pixel_variance16x8/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
specialize qw/vp9_sub_pixel_variance16x8/, "$sse2_x86inc", "$ssse3_x86inc";
specialize qw/vp9_sub_pixel_variance16x8 msa/, "$sse2_x86inc", "$ssse3_x86inc";
add_proto qw/unsigned int vp9_sub_pixel_avg_variance16x8/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse, const uint8_t *second_pred";
specialize qw/vp9_sub_pixel_avg_variance16x8/, "$sse2_x86inc", "$ssse3_x86inc";
add_proto qw/unsigned int vp9_sub_pixel_variance8x8/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
specialize qw/vp9_sub_pixel_variance8x8 neon/, "$sse2_x86inc", "$ssse3_x86inc";
specialize qw/vp9_sub_pixel_variance8x8 neon msa/, "$sse2_x86inc", "$ssse3_x86inc";
add_proto qw/unsigned int vp9_sub_pixel_avg_variance8x8/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse, const uint8_t *second_pred";
specialize qw/vp9_sub_pixel_avg_variance8x8/, "$sse2_x86inc", "$ssse3_x86inc";
# TODO(jingning): need to convert 8x4/4x8 functions into mmx/sse form
add_proto qw/unsigned int vp9_sub_pixel_variance8x4/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
specialize qw/vp9_sub_pixel_variance8x4/, "$sse2_x86inc", "$ssse3_x86inc";
specialize qw/vp9_sub_pixel_variance8x4 msa/, "$sse2_x86inc", "$ssse3_x86inc";
add_proto qw/unsigned int vp9_sub_pixel_avg_variance8x4/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse, const uint8_t *second_pred";
specialize qw/vp9_sub_pixel_avg_variance8x4/, "$sse2_x86inc", "$ssse3_x86inc";
add_proto qw/unsigned int vp9_sub_pixel_variance4x8/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
specialize qw/vp9_sub_pixel_variance4x8/, "$sse_x86inc", "$ssse3_x86inc";
specialize qw/vp9_sub_pixel_variance4x8 msa/, "$sse_x86inc", "$ssse3_x86inc";
add_proto qw/unsigned int vp9_sub_pixel_avg_variance4x8/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse, const uint8_t *second_pred";
specialize qw/vp9_sub_pixel_avg_variance4x8/, "$sse_x86inc", "$ssse3_x86inc";
add_proto qw/unsigned int vp9_sub_pixel_variance4x4/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
specialize qw/vp9_sub_pixel_variance4x4/, "$sse_x86inc", "$ssse3_x86inc";
specialize qw/vp9_sub_pixel_variance4x4 msa/, "$sse_x86inc", "$ssse3_x86inc";
#vp9_sub_pixel_variance4x4_sse2=vp9_sub_pixel_variance4x4_wmt
add_proto qw/unsigned int vp9_sub_pixel_avg_variance4x4/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse, const uint8_t *second_pred";
......
/*
* Copyright (c) 2015 The WebM project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "./vp9_rtcd.h"
#include "./vpx_dsp_rtcd.h"
#include "vpx_ports/mem.h"
#include "vp9/common/vp9_filter.h"
#include "vp9/common/mips/msa/vp9_macros_msa.h"
DECLARE_ALIGNED(256, static const int8_t, vp9_bilinear_filters_msa[15][2]) = {
{ 120, 8 },
{ 112, 16 },
{ 104, 24 },
{ 96, 32 },
{ 88, 40 },
{ 80, 48 },
{ 72, 56 },
{ 64, 64 },
{ 56, 72 },
{ 48, 80 },
{ 40, 88 },
{ 32, 96 },
{ 24, 104 },
{ 16, 112 },
{ 8, 120 }
};
#define CALC_MSE_AVG_B(src, ref, var, sub) { \
v16u8 src_l0_m, src_l1_m; \
v8i16 res_l0_m, res_l1_m; \
\
ILVRL_B2_UB(src, ref, src_l0_m, src_l1_m); \
HSUB_UB2_SH(src_l0_m, src_l1_m, res_l0_m, res_l1_m); \
DPADD_SH2_SW(res_l0_m, res_l1_m, res_l0_m, res_l1_m, var, var); \
\
sub += res_l0_m + res_l1_m; \
}
#define VARIANCE_WxH(sse, diff, shift) \
sse - (((uint32_t)diff * diff) >> shift)
#define VARIANCE_LARGE_WxH(sse, diff, shift) \
sse - (((int64_t)diff * diff) >> shift)
static uint32_t sub_pixel_sse_diff_4width_h_msa(const uint8_t *src,
int32_t src_stride,
const uint8_t *dst,
int32_t dst_stride,
const int8_t *filter,
int32_t height,
int32_t *diff) {
int16_t filtval;
uint32_t loop_cnt;
uint32_t ref0, ref1, ref2, ref3;
v16u8 filt0, ref = { 0 };
v16i8 src0, src1, src2, src3;
v16i8 mask = { 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8 };
v8u16 vec0, vec1, vec2, vec3;
v8u16 const255;
v8i16 avg = { 0 };
v4i32 vec, var = { 0 };
filtval = LH(filter);
filt0 = (v16u8)__msa_fill_h(filtval);
const255 = (v8u16)__msa_ldi_h(255);
for (loop_cnt = (height >> 2); loop_cnt--;) {
LD_SB4(src, src_stride, src0, src1, src2, src3);
src += (4 * src_stride);
LW4(dst, dst_stride, ref0, ref1, ref2, ref3);
dst += (4 * dst_stride);
INSERT_W4_UB(ref0, ref1, ref2, ref3, ref);
VSHF_B2_UH(src0, src0, src1, src1, mask, mask, vec0, vec1);
VSHF_B2_UH(src2, src2, src3, src3, mask, mask, vec2, vec3);
DOTP_UB4_UH(vec0, vec1, vec2, vec3, filt0, filt0, filt0, filt0,
vec0, vec1, vec2, vec3);
SRARI_H4_UH(vec0, vec1, vec2, vec3, FILTER_BITS);
MIN_UH4_UH(vec0, vec1, vec2, vec3, const255);
PCKEV_B4_SB(vec0, vec0, vec1, vec1, vec2, vec2, vec3, vec3,
src0, src1, src2, src3);
ILVEV_W2_SB(src0, src1, src2, src3, src0, src2);
src0 = (v16i8)__msa_ilvev_d((v2i64)src2, (v2i64)src0);
CALC_MSE_AVG_B(src0, ref, var, avg);
}
vec = __msa_hadd_s_w(avg, avg);
*diff = HADD_SW_S32(vec);
return HADD_SW_S32(var);
}
static uint32_t sub_pixel_sse_diff_8width_h_msa(const uint8_t *src,
int32_t src_stride,
const uint8_t *dst,
int32_t dst_stride,
const int8_t *filter,
int32_t height,
int32_t *diff) {
int16_t filtval;
uint32_t loop_cnt;
v16u8 filt0, out, ref0, ref1, ref2, ref3;
v16i8 src0, src1, src2, src3;
v16i8 mask = { 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8 };
v8u16 vec0, vec1, vec2, vec3, const255;
v8i16 avg = { 0 };
v4i32 vec, var = { 0 };
filtval = LH(filter);
filt0 = (v16u8)__msa_fill_h(filtval);
const255 = (v8u16)__msa_ldi_h(255);
for (loop_cnt = (height >> 2); loop_cnt--;) {
LD_SB4(src, src_stride, src0, src1, src2, src3);
src += (4 * src_stride);
LD_UB4(dst, dst_stride, ref0, ref1, ref2, ref3);
dst += (4 * dst_stride);
PCKEV_D2_UB(ref1, ref0, ref3, ref2, ref0, ref1);
VSHF_B2_UH(src0, src0, src1, src1, mask, mask, vec0, vec1);
VSHF_B2_UH(src2, src2, src3, src3, mask, mask, vec2, vec3);
DOTP_UB4_UH(vec0, vec1, vec2, vec3, filt0, filt0, filt0, filt0,
vec0, vec1, vec2, vec3);
SRARI_H4_UH(vec0, vec1, vec2, vec3, FILTER_BITS);
MIN_UH4_UH(vec0, vec1, vec2, vec3, const255);
PCKEV_B4_SB(vec0, vec0, vec1, vec1, vec2, vec2, vec3, vec3,
src0, src1, src2, src3);
out = (v16u8)__msa_ilvev_d((v2i64)src1, (v2i64)src0);
CALC_MSE_AVG_B(out, ref0, var, avg);
out = (v16u8)__msa_ilvev_d((v2i64)src3, (v2i64)src2);
CALC_MSE_AVG_B(out, ref1, var, avg);
}
vec = __msa_hadd_s_w(avg, avg);
*diff = HADD_SW_S32(vec);
return HADD_SW_S32(var);
}
static uint32_t sub_pixel_sse_diff_16width_h_msa(const uint8_t *src,
int32_t src_stride,
const uint8_t *dst,
int32_t dst_stride,
const int8_t *filter,
int32_t height,
int32_t *diff) {
int16_t filtval;
uint32_t loop_cnt;
v16i8 src0, src1, src2, src3, src4, src5, src6, src7;
v16i8 mask = { 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8 };
v16u8 dst0, dst1, dst2, dst3, filt0;
v8u16 vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7;
v8u16 out0, out1, out2, out3, out4, out5, out6, out7;
v8u16 const255;
v8i16 avg = { 0 };
v4i32 vec, var = { 0 };
filtval = LH(filter);
filt0 = (v16u8)__msa_fill_h(filtval);
const255 = (v8u16)__msa_ldi_h(255);
for (loop_cnt = (height >> 2); loop_cnt--;) {
LD_SB4(src, src_stride, src0, src2, src4, src6);
LD_SB4(src + 8, src_stride, src1, src3, src5, src7);
src += (4 * src_stride);
LD_UB4(dst, dst_stride, dst0, dst1, dst2, dst3);
dst += (4 * dst_stride);
VSHF_B2_UH(src0, src0, src1, src1, mask, mask, vec0, vec1);
VSHF_B2_UH(src2, src2, src3, src3, mask, mask, vec2, vec3);
VSHF_B2_UH(src4, src4, src5, src5, mask, mask, vec4, vec5);
VSHF_B2_UH(src6, src6, src7, src7, mask, mask, vec6, vec7);
DOTP_UB4_UH(vec0, vec1, vec2, vec3, filt0, filt0, filt0, filt0,
out0, out1, out2, out3);
DOTP_UB4_UH(vec4, vec5, vec6, vec7, filt0, filt0, filt0, filt0,
out4, out5, out6, out7);
SRARI_H4_UH(out0, out1, out2, out3, FILTER_BITS);
SRARI_H4_UH(out4, out5, out6, out7, FILTER_BITS);
MIN_UH4_UH(out0, out1, out2, out3, const255);
MIN_UH4_UH(out4, out5, out6, out7, const255);
PCKEV_B4_SB(out1, out0, out3, out2, out5, out4, out7, out6,
src0, src1, src2, src3);
CALC_MSE_AVG_B(src0, dst0, var, avg);
CALC_MSE_AVG_B(src1, dst1, var, avg);
CALC_MSE_AVG_B(src2, dst2, var, avg);
CALC_MSE_AVG_B(src3, dst3, var, avg);
}
vec = __msa_hadd_s_w(avg, avg);
*diff = HADD_SW_S32(vec);
return HADD_SW_S32(var);
}
static uint32_t sub_pixel_sse_diff_32width_h_msa(const uint8_t *src,
int32_t src_stride,
const uint8_t *dst,
int32_t dst_stride,
const int8_t *filter,
int32_t height,
int32_t *diff) {
uint32_t loop_cnt, sse = 0;
int32_t diff0[2];
for (loop_cnt = 0; loop_cnt < 2; ++loop_cnt) {
sse += sub_pixel_sse_diff_16width_h_msa(src, src_stride, dst, dst_stride,
filter, height, &diff0[loop_cnt]);
src += 16;
dst += 16;
}
*diff = diff0[0] + diff0[1];
return sse;
}
static uint32_t sub_pixel_sse_diff_64width_h_msa(const uint8_t *src,
int32_t src_stride,
const uint8_t *dst,
int32_t dst_stride,
const int8_t *filter,
int32_t height,
int32_t *diff) {
uint32_t loop_cnt, sse = 0;
int32_t diff0[4];
for (loop_cnt = 0; loop_cnt < 4; ++loop_cnt) {
sse += sub_pixel_sse_diff_16width_h_msa(src, src_stride, dst, dst_stride,
filter, height, &diff0[loop_cnt]);
src += 16;
dst += 16;
}
*diff = diff0[0] + diff0[1] + diff0[2] + diff0[3];
return sse;
}
static uint32_t sub_pixel_sse_diff_4width_v_msa(const uint8_t *src,
int32_t src_stride,
const uint8_t *dst,
int32_t dst_stride,
const int8_t *filter,
int32_t height,
int32_t *diff) {
int16_t filtval;
uint32_t loop_cnt;
uint32_t ref0, ref1, ref2, ref3;
v16u8 src0, src1, src2, src3, src4, out;
v16u8 src10_r, src32_r, src21_r, src43_r;
v16u8 ref = { 0 };
v16u8 src2110, src4332;
v16u8 filt0;
v8i16 avg = { 0 };
v4i32 vec, var = { 0 };
v8u16 tmp0, tmp1;
filtval = LH(filter);
filt0 = (v16u8)__msa_fill_h(filtval);
src0 = LD_UB(src);
src += src_stride;
for (loop_cnt = (height >> 2); loop_cnt--;) {
LD_UB4(src, src_stride, src1, src2, src3, src4);
src += (4 * src_stride);
LW4(dst, dst_stride, ref0, ref1, ref2, ref3);
dst += (4 * dst_stride);
INSERT_W4_UB(ref0, ref1, ref2, ref3, ref);
ILVR_B4_UB(src1, src0, src2, src1, src3, src2, src4, src3,
src10_r, src21_r, src32_r, src43_r);
ILVR_D2_UB(src21_r, src10_r, src43_r, src32_r, src2110, src4332);
DOTP_UB2_UH(src2110, src4332, filt0, filt0, tmp0, tmp1);
SRARI_H2_UH(tmp0, tmp1, FILTER_BITS);
SAT_UH2_UH(tmp0, tmp1, 7);
out = (v16u8)__msa_pckev_b((v16i8)tmp1, (v16i8)tmp0);
CALC_MSE_AVG_B(out, ref, var, avg);
src0 = src4;
}
vec = __msa_hadd_s_w(avg, avg);
*diff = HADD_SW_S32(vec);
return HADD_SW_S32(var);
}
static uint32_t sub_pixel_sse_diff_8width_v_msa(const uint8_t *src,
int32_t src_stride,
const uint8_t *dst,
int32_t dst_stride,
const int8_t *filter,
int32_t height,
int32_t *diff) {
int16_t filtval;
uint32_t loop_cnt;
v16u8 src0, src1, src2, src3, src4;
v16u8 ref0, ref1, ref2, ref3;
v8u16 vec0, vec1, vec2, vec3;
v8u16 tmp0, tmp1, tmp2, tmp3;
v16u8 filt0;
v8i16 avg = { 0 };
v4i32 vec, var = { 0 };
filtval = LH(filter);
filt0 = (v16u8)__msa_fill_h(filtval);
src0 = LD_UB(src);
src += src_stride;
for (loop_cnt = (height >> 2); loop_cnt--;) {
LD_UB4(src, src_stride, src1, src2, src3, src4);
src += (4 * src_stride);
LD_UB4(dst, dst_stride, ref0, ref1, ref2, ref3);
dst += (4 * dst_stride);
PCKEV_D2_UB(ref1, ref0, ref3, ref2, ref0, ref1);
ILVR_B4_UH(src1, src0, src2, src1, src3, src2, src4, src3,
vec0, vec1, vec2, vec3);
DOTP_UB4_UH(vec0, vec1, vec2, vec3, filt0, filt0, filt0, filt0,
tmp0, tmp1, tmp2, tmp3);
SRARI_H4_UH(tmp0, tmp1, tmp2, tmp3, FILTER_BITS);
SAT_UH4_UH(tmp0, tmp1, tmp2, tmp3, 7);
PCKEV_B2_UB(tmp1, tmp0, tmp3, tmp2, src0, src1);
CALC_MSE_AVG_B(src0, ref0, var, avg);
CALC_MSE_AVG_B(src1, ref1, var, avg);
src0 = src4;
}
vec = __msa_hadd_s_w(avg, avg);
*diff = HADD_SW_S32(vec);
return HADD_SW_S32(var);
}
static uint32_t sub_pixel_sse_diff_16width_v_msa(const uint8_t *src,
int32_t src_stride,
const uint8_t *dst,
int32_t dst_stride,
const int8_t *filter,
int32_t height,
int32_t *diff) {
int16_t filtval;
uint32_t loop_cnt;
v16u8 ref0, ref1, ref2, ref3;
v16u8 src0, src1, src2, src3, src4;
v16u8 out0, out1, out2, out3;
v16u8 vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7;
v8u16 tmp0, tmp1, tmp2, tmp3;
v16u8 filt0;
v8i16 avg = { 0 };
v4i32 vec, var = { 0 };
filtval = LH(filter);
filt0 = (v16u8)__msa_fill_h(filtval);
src0 = LD_UB(src);
src += src_stride;
for (loop_cnt = (height >> 2); loop_cnt--;) {
LD_UB4(src, src_stride, src1, src2, src3, src4);
src += (4 * src_stride);
LD_UB4(dst, dst_stride, ref0, ref1, ref2, ref3);
dst += (4 * dst_stride);
ILVR_B2_UB(src1, src0, src2, src1, vec0, vec2);
ILVL_B2_UB(src1, src0, src2, src1, vec1, vec3);
DOTP_UB2_UH(vec0, vec1, filt0, filt0, tmp0, tmp1);
SRARI_H2_UH(tmp0, tmp1, FILTER_BITS);
SAT_UH2_UH(tmp0, tmp1, 7);
out0 = (v16u8)__msa_pckev_b((v16i8)tmp1, (v16i8)tmp0);
ILVR_B2_UB(src3, src2, src4, src3, vec4, vec6);
ILVL_B2_UB(src3, src2, src4, src3, vec5, vec7);
DOTP_UB2_UH(vec2, vec3, filt0, filt0, tmp2, tmp3);
SRARI_H2_UH(tmp2, tmp3, FILTER_BITS);
SAT_UH2_UH(tmp2, tmp3, 7);
out1 = (v16u8)__msa_pckev_b((v16i8)tmp3, (v16i8)tmp2);
DOTP_UB2_UH(vec4, vec5, filt0, filt0, tmp0, tmp1);
SRARI_H2_UH(tmp0, tmp1, FILTER_BITS);
SAT_UH2_UH(tmp0, tmp1, 7);
out2 = (v16u8)__msa_pckev_b((v16i8)tmp1, (v16i8)tmp0);
DOTP_UB2_UH(vec6, vec7, filt0, filt0, tmp2, tmp3);
SRARI_H2_UH(tmp2, tmp3, FILTER_BITS);
SAT_UH2_UH(tmp2, tmp3, 7);
out3 = (v16u8)__msa_pckev_b((v16i8)tmp3, (v16i8)tmp2);
src0 = src4;
/* loop runs height/4 */
CALC_MSE_AVG_B(out0, ref0, var, avg);
CALC_MSE_AVG_B(out1, ref1, var, avg);
CALC_MSE_AVG_B(out2, ref2, var, avg);
CALC_MSE_AVG_B(out3, ref3, var, avg);
}
vec = __msa_hadd_s_w(avg, avg);
*diff = HADD_SW_S32(vec);
return HADD_SW_S32(var);
}
static uint32_t sub_pixel_sse_diff_32width_v_msa(const uint8_t *src,
int32_t src_stride,
const uint8_t *dst,
int32_t dst_stride,
const int8_t *filter,
int32_t height,
int32_t *diff) {
uint32_t loop_cnt, sse = 0;
int32_t diff0[2];
for (loop_cnt = 0; loop_cnt < 2; ++loop_cnt) {
sse += sub_pixel_sse_diff_16width_v_msa(src, src_stride, dst, dst_stride,
filter, height, &diff0[loop_cnt]);
src += 16;
dst += 16;
}
*diff = diff0[0] + diff0[1];
return sse;
}
static uint32_t sub_pixel_sse_diff_64width_v_msa(const uint8_t *src,
int32_t src_stride,
const uint8_t *dst,
int32_t dst_stride,
const int8_t *filter,
int32_t height,
int32_t *diff) {
uint32_t loop_cnt, sse = 0;
int32_t diff0[4];
for (loop_cnt = 0; loop_cnt < 4; ++loop_cnt) {
sse += sub_pixel_sse_diff_16width_v_msa(src, src_stride, dst, dst_stride,
filter, height, &diff0[loop_cnt