Commit ced98264 authored by James Zern's avatar James Zern

Revert "mips msa vp9 avg subpel variance optimization"

This reverts commit 61774ad1.

this change causes MSA/VP9SubpelAvgVarianceTest.Ref failures under
mips32r5el-msa-linux-gnu and mips64r6el-msa-linux-gnu

Change-Id: I7fb520c12b2a3b212d5e84b7619a380a48e49bb0
parent e7578084
......@@ -2095,47 +2095,6 @@ INSTANTIATE_TEST_CASE_P(
make_tuple(5, 6, subpel_variance32x64_msa, 0),
make_tuple(6, 5, subpel_variance64x32_msa, 0),
make_tuple(6, 6, subpel_variance64x64_msa, 0)));
const vp9_subp_avg_variance_fn_t subpel_avg_variance4x4_msa =
vp9_sub_pixel_avg_variance4x4_msa;
const vp9_subp_avg_variance_fn_t subpel_avg_variance4x8_msa =
vp9_sub_pixel_avg_variance4x8_msa;
const vp9_subp_avg_variance_fn_t subpel_avg_variance8x4_msa =
vp9_sub_pixel_avg_variance8x4_msa;
const vp9_subp_avg_variance_fn_t subpel_avg_variance8x8_msa =
vp9_sub_pixel_avg_variance8x8_msa;
const vp9_subp_avg_variance_fn_t subpel_avg_variance8x16_msa =
vp9_sub_pixel_avg_variance8x16_msa;
const vp9_subp_avg_variance_fn_t subpel_avg_variance16x8_msa =
vp9_sub_pixel_avg_variance16x8_msa;
const vp9_subp_avg_variance_fn_t subpel_avg_variance16x16_msa =
vp9_sub_pixel_avg_variance16x16_msa;
const vp9_subp_avg_variance_fn_t subpel_avg_variance16x32_msa =
vp9_sub_pixel_avg_variance16x32_msa;
const vp9_subp_avg_variance_fn_t subpel_avg_variance32x16_msa =
vp9_sub_pixel_avg_variance32x16_msa;
const vp9_subp_avg_variance_fn_t subpel_avg_variance32x32_msa =
vp9_sub_pixel_avg_variance32x32_msa;
const vp9_subp_avg_variance_fn_t subpel_avg_variance32x64_msa =
vp9_sub_pixel_avg_variance32x64_msa;
const vp9_subp_avg_variance_fn_t subpel_avg_variance64x32_msa =
vp9_sub_pixel_avg_variance64x32_msa;
const vp9_subp_avg_variance_fn_t subpel_avg_variance64x64_msa =
vp9_sub_pixel_avg_variance64x64_msa;
INSTANTIATE_TEST_CASE_P(
MSA, VP9SubpelAvgVarianceTest,
::testing::Values(make_tuple(2, 2, subpel_avg_variance4x4_msa, 0),
make_tuple(2, 3, subpel_avg_variance4x8_msa, 0),
make_tuple(3, 2, subpel_avg_variance8x4_msa, 0),
make_tuple(3, 3, subpel_avg_variance8x8_msa, 0),
make_tuple(3, 4, subpel_avg_variance8x16_msa, 0),
make_tuple(4, 3, subpel_avg_variance16x8_msa, 0),
make_tuple(4, 4, subpel_avg_variance16x16_msa, 0),
make_tuple(4, 5, subpel_avg_variance16x32_msa, 0),
make_tuple(5, 4, subpel_avg_variance32x16_msa, 0),
make_tuple(5, 5, subpel_avg_variance32x32_msa, 0),
make_tuple(5, 6, subpel_avg_variance32x64_msa, 0),
make_tuple(6, 5, subpel_avg_variance64x32_msa, 0),
make_tuple(6, 6, subpel_avg_variance64x64_msa, 0)));
#endif // CONFIG_VP9_ENCODER
#endif // HAVE_MSA
} // namespace
......@@ -808,81 +808,81 @@ add_proto qw/unsigned int vp9_sub_pixel_variance64x64/, "const uint8_t *src_ptr,
specialize qw/vp9_sub_pixel_variance64x64 avx2 neon msa/, "$sse2_x86inc", "$ssse3_x86inc";
add_proto qw/unsigned int vp9_sub_pixel_avg_variance64x64/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse, const uint8_t *second_pred";
specialize qw/vp9_sub_pixel_avg_variance64x64 avx2 msa/, "$sse2_x86inc", "$ssse3_x86inc";
specialize qw/vp9_sub_pixel_avg_variance64x64 avx2/, "$sse2_x86inc", "$ssse3_x86inc";
add_proto qw/unsigned int vp9_sub_pixel_variance32x64/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
specialize qw/vp9_sub_pixel_variance32x64 msa/, "$sse2_x86inc", "$ssse3_x86inc";
add_proto qw/unsigned int vp9_sub_pixel_avg_variance32x64/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse, const uint8_t *second_pred";
specialize qw/vp9_sub_pixel_avg_variance32x64 msa/, "$sse2_x86inc", "$ssse3_x86inc";
specialize qw/vp9_sub_pixel_avg_variance32x64/, "$sse2_x86inc", "$ssse3_x86inc";
add_proto qw/unsigned int vp9_sub_pixel_variance64x32/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
specialize qw/vp9_sub_pixel_variance64x32 msa/, "$sse2_x86inc", "$ssse3_x86inc";
add_proto qw/unsigned int vp9_sub_pixel_avg_variance64x32/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse, const uint8_t *second_pred";
specialize qw/vp9_sub_pixel_avg_variance64x32 msa/, "$sse2_x86inc", "$ssse3_x86inc";
specialize qw/vp9_sub_pixel_avg_variance64x32/, "$sse2_x86inc", "$ssse3_x86inc";
add_proto qw/unsigned int vp9_sub_pixel_variance32x16/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
specialize qw/vp9_sub_pixel_variance32x16 msa/, "$sse2_x86inc", "$ssse3_x86inc";
add_proto qw/unsigned int vp9_sub_pixel_avg_variance32x16/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse, const uint8_t *second_pred";
specialize qw/vp9_sub_pixel_avg_variance32x16 msa/, "$sse2_x86inc", "$ssse3_x86inc";
specialize qw/vp9_sub_pixel_avg_variance32x16/, "$sse2_x86inc", "$ssse3_x86inc";
add_proto qw/unsigned int vp9_sub_pixel_variance16x32/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
specialize qw/vp9_sub_pixel_variance16x32 msa/, "$sse2_x86inc", "$ssse3_x86inc";
add_proto qw/unsigned int vp9_sub_pixel_avg_variance16x32/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse, const uint8_t *second_pred";
specialize qw/vp9_sub_pixel_avg_variance16x32 msa/, "$sse2_x86inc", "$ssse3_x86inc";
specialize qw/vp9_sub_pixel_avg_variance16x32/, "$sse2_x86inc", "$ssse3_x86inc";
add_proto qw/unsigned int vp9_sub_pixel_variance32x32/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
specialize qw/vp9_sub_pixel_variance32x32 avx2 neon msa/, "$sse2_x86inc", "$ssse3_x86inc";
add_proto qw/unsigned int vp9_sub_pixel_avg_variance32x32/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse, const uint8_t *second_pred";
specialize qw/vp9_sub_pixel_avg_variance32x32 avx2 msa/, "$sse2_x86inc", "$ssse3_x86inc";
specialize qw/vp9_sub_pixel_avg_variance32x32 avx2/, "$sse2_x86inc", "$ssse3_x86inc";
add_proto qw/unsigned int vp9_sub_pixel_variance16x16/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
specialize qw/vp9_sub_pixel_variance16x16 neon msa/, "$sse2_x86inc", "$ssse3_x86inc";
add_proto qw/unsigned int vp9_sub_pixel_avg_variance16x16/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse, const uint8_t *second_pred";
specialize qw/vp9_sub_pixel_avg_variance16x16 msa/, "$sse2_x86inc", "$ssse3_x86inc";
specialize qw/vp9_sub_pixel_avg_variance16x16/, "$sse2_x86inc", "$ssse3_x86inc";
add_proto qw/unsigned int vp9_sub_pixel_variance8x16/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
specialize qw/vp9_sub_pixel_variance8x16 msa/, "$sse2_x86inc", "$ssse3_x86inc";
add_proto qw/unsigned int vp9_sub_pixel_avg_variance8x16/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse, const uint8_t *second_pred";
specialize qw/vp9_sub_pixel_avg_variance8x16 msa/, "$sse2_x86inc", "$ssse3_x86inc";
specialize qw/vp9_sub_pixel_avg_variance8x16/, "$sse2_x86inc", "$ssse3_x86inc";
add_proto qw/unsigned int vp9_sub_pixel_variance16x8/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
specialize qw/vp9_sub_pixel_variance16x8 msa/, "$sse2_x86inc", "$ssse3_x86inc";
add_proto qw/unsigned int vp9_sub_pixel_avg_variance16x8/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse, const uint8_t *second_pred";
specialize qw/vp9_sub_pixel_avg_variance16x8 msa/, "$sse2_x86inc", "$ssse3_x86inc";
specialize qw/vp9_sub_pixel_avg_variance16x8/, "$sse2_x86inc", "$ssse3_x86inc";
add_proto qw/unsigned int vp9_sub_pixel_variance8x8/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
specialize qw/vp9_sub_pixel_variance8x8 neon msa/, "$sse2_x86inc", "$ssse3_x86inc";
add_proto qw/unsigned int vp9_sub_pixel_avg_variance8x8/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse, const uint8_t *second_pred";
specialize qw/vp9_sub_pixel_avg_variance8x8 msa/, "$sse2_x86inc", "$ssse3_x86inc";
specialize qw/vp9_sub_pixel_avg_variance8x8/, "$sse2_x86inc", "$ssse3_x86inc";
# TODO(jingning): need to convert 8x4/4x8 functions into mmx/sse form
add_proto qw/unsigned int vp9_sub_pixel_variance8x4/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
specialize qw/vp9_sub_pixel_variance8x4 msa/, "$sse2_x86inc", "$ssse3_x86inc";
add_proto qw/unsigned int vp9_sub_pixel_avg_variance8x4/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse, const uint8_t *second_pred";
specialize qw/vp9_sub_pixel_avg_variance8x4 msa/, "$sse2_x86inc", "$ssse3_x86inc";
specialize qw/vp9_sub_pixel_avg_variance8x4/, "$sse2_x86inc", "$ssse3_x86inc";
add_proto qw/unsigned int vp9_sub_pixel_variance4x8/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
specialize qw/vp9_sub_pixel_variance4x8 msa/, "$sse_x86inc", "$ssse3_x86inc";
add_proto qw/unsigned int vp9_sub_pixel_avg_variance4x8/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse, const uint8_t *second_pred";
specialize qw/vp9_sub_pixel_avg_variance4x8 msa/, "$sse_x86inc", "$ssse3_x86inc";
specialize qw/vp9_sub_pixel_avg_variance4x8/, "$sse_x86inc", "$ssse3_x86inc";
add_proto qw/unsigned int vp9_sub_pixel_variance4x4/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
specialize qw/vp9_sub_pixel_variance4x4 msa/, "$sse_x86inc", "$ssse3_x86inc";
#vp9_sub_pixel_variance4x4_sse2=vp9_sub_pixel_variance4x4_wmt
add_proto qw/unsigned int vp9_sub_pixel_avg_variance4x4/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse, const uint8_t *second_pred";
specialize qw/vp9_sub_pixel_avg_variance4x4 msa/, "$sse_x86inc", "$ssse3_x86inc";
specialize qw/vp9_sub_pixel_avg_variance4x4/, "$sse_x86inc", "$ssse3_x86inc";
add_proto qw/unsigned int vp9_avg_8x8/, "const uint8_t *, int p";
specialize qw/vp9_avg_8x8 sse2 neon msa/;
......
......@@ -49,357 +49,6 @@ DECLARE_ALIGNED(256, static const int8_t, vp9_bilinear_filters_msa[15][2]) = {
#define VARIANCE_LARGE_WxH(sse, diff, shift) \
sse - (((int64_t)diff * diff) >> shift)
static uint32_t avg_sse_diff_4width_msa(const uint8_t *src_ptr,
int32_t src_stride,
const uint8_t *ref_ptr,
int32_t ref_stride,
const uint8_t *sec_pred,
int32_t height,
int32_t *diff) {
int32_t ht_cnt;
uint32_t src0, src1, src2, src3;
uint32_t ref0, ref1, ref2, ref3;
v16u8 pred, src = { 0 };
v16u8 ref = { 0 };
v8i16 avg = { 0 };
v4i32 vec, var = { 0 };
for (ht_cnt = (height >> 2); ht_cnt--;) {
pred = LD_UB(sec_pred);
sec_pred += 16;
LW4(src_ptr, src_stride, src0, src1, src2, src3);
src_ptr += (4 * src_stride);
LW4(ref_ptr, ref_stride, ref0, ref1, ref2, ref3);
ref_ptr += (4 * ref_stride);
INSERT_W4_UB(src0, src1, src2, src3, src);
INSERT_W4_UB(ref0, ref1, ref2, ref3, ref);
src = __msa_aver_u_b(src, pred);
CALC_MSE_AVG_B(src, ref, var, avg);
}
vec = __msa_hadd_s_w(avg, avg);
*diff = HADD_SW_S32(vec);
return HADD_SW_S32(var);
}
static uint32_t avg_sse_diff_8width_msa(const uint8_t *src_ptr,
int32_t src_stride,
const uint8_t *ref_ptr,
int32_t ref_stride,
const uint8_t *sec_pred,
int32_t height,
int32_t *diff) {
int32_t ht_cnt;
v16u8 src0, src1, src2, src3;
v16u8 ref0, ref1, ref2, ref3;
v16u8 pred0, pred1;
v8i16 avg = { 0 };
v4i32 vec, var = { 0 };
for (ht_cnt = (height >> 2); ht_cnt--;) {
LD_UB2(sec_pred, 16, pred0, pred1);
sec_pred += 32;
LD_UB4(src_ptr, src_stride, src0, src1, src2, src3);
src_ptr += (4 * src_stride);
LD_UB4(ref_ptr, ref_stride, ref0, ref1, ref2, ref3);
ref_ptr += (4 * ref_stride);
PCKEV_D4_UB(src1, src0, src3, src2, ref1, ref0, ref3, ref2,
src0, src1, ref0, ref1);
AVER_UB2_UB(src0, pred0, src1, pred1, src0, src1);
CALC_MSE_AVG_B(src0, ref0, var, avg);
CALC_MSE_AVG_B(src1, ref1, var, avg);
}
vec = __msa_hadd_s_w(avg, avg);
*diff = HADD_SW_S32(vec);
return HADD_SW_S32(var);
}
static uint32_t avg_sse_diff_16width_msa(const uint8_t *src_ptr,
int32_t src_stride,
const uint8_t *ref_ptr,
int32_t ref_stride,
const uint8_t *sec_pred,
int32_t height,
int32_t *diff) {
int32_t ht_cnt;
v16u8 src, ref, pred;
v8i16 avg = { 0 };
v4i32 vec, var = { 0 };
for (ht_cnt = (height >> 2); ht_cnt--;) {
pred = LD_UB(sec_pred);
sec_pred += 16;
src = LD_UB(src_ptr);
src_ptr += src_stride;
ref = LD_UB(ref_ptr);
ref_ptr += ref_stride;
src = __msa_aver_u_b(src, pred);
CALC_MSE_AVG_B(src, ref, var, avg);
pred = LD_UB(sec_pred);
sec_pred += 16;
src = LD_UB(src_ptr);
src_ptr += src_stride;
ref = LD_UB(ref_ptr);
ref_ptr += ref_stride;
src = __msa_aver_u_b(src, pred);
CALC_MSE_AVG_B(src, ref, var, avg);
pred = LD_UB(sec_pred);
sec_pred += 16;
src = LD_UB(src_ptr);
src_ptr += src_stride;
ref = LD_UB(ref_ptr);
ref_ptr += ref_stride;
src = __msa_aver_u_b(src, pred);
CALC_MSE_AVG_B(src, ref, var, avg);
pred = LD_UB(sec_pred);
sec_pred += 16;
src = LD_UB(src_ptr);
src_ptr += src_stride;
ref = LD_UB(ref_ptr);
ref_ptr += ref_stride;
src = __msa_aver_u_b(src, pred);
CALC_MSE_AVG_B(src, ref, var, avg);
}
vec = __msa_hadd_s_w(avg, avg);
*diff = HADD_SW_S32(vec);
return HADD_SW_S32(var);
}
static uint32_t avg_sse_diff_32width_msa(const uint8_t *src_ptr,
int32_t src_stride,
const uint8_t *ref_ptr,
int32_t ref_stride,
const uint8_t *sec_pred,
int32_t height,
int32_t *diff) {
int32_t ht_cnt;
v16u8 src0, src1, ref0, ref1, pred0, pred1;
v8i16 avg = { 0 };
v4i32 vec, var = { 0 };
for (ht_cnt = (height >> 2); ht_cnt--;) {
LD_UB2(sec_pred, 16, pred0, pred1);
sec_pred += 32;
LD_UB2(src_ptr, 16, src0, src1);
src_ptr += src_stride;
LD_UB2(ref_ptr, 16, ref0, ref1);
ref_ptr += ref_stride;
AVER_UB2_UB(src0, pred0, src1, pred1, src0, src1);
CALC_MSE_AVG_B(src0, ref0, var, avg);
CALC_MSE_AVG_B(src1, ref1, var, avg);
LD_UB2(sec_pred, 16, pred0, pred1);
sec_pred += 32;
LD_UB2(src_ptr, 16, src0, src1);
src_ptr += src_stride;
LD_UB2(ref_ptr, 16, ref0, ref1);
ref_ptr += ref_stride;
AVER_UB2_UB(src0, pred0, src1, pred1, src0, src1);
CALC_MSE_AVG_B(src0, ref0, var, avg);
CALC_MSE_AVG_B(src1, ref1, var, avg);
LD_UB2(sec_pred, 16, pred0, pred1);
sec_pred += 32;
LD_UB2(src_ptr, 16, src0, src1);
src_ptr += src_stride;
LD_UB2(ref_ptr, 16, ref0, ref1);
ref_ptr += ref_stride;
AVER_UB2_UB(src0, pred0, src1, pred1, src0, src1);
CALC_MSE_AVG_B(src0, ref0, var, avg);
CALC_MSE_AVG_B(src1, ref1, var, avg);
LD_UB2(sec_pred, 16, pred0, pred1);
sec_pred += 32;
LD_UB2(src_ptr, 16, src0, src1);
src_ptr += src_stride;
LD_UB2(ref_ptr, 16, ref0, ref1);
ref_ptr += ref_stride;
AVER_UB2_UB(src0, pred0, src1, pred1, src0, src1);
CALC_MSE_AVG_B(src0, ref0, var, avg);
CALC_MSE_AVG_B(src1, ref1, var, avg);
}
vec = __msa_hadd_s_w(avg, avg);
*diff = HADD_SW_S32(vec);
return HADD_SW_S32(var);
}
static uint32_t avg_sse_diff_32x64_msa(const uint8_t *src_ptr,
int32_t src_stride,
const uint8_t *ref_ptr,
int32_t ref_stride,
const uint8_t *sec_pred,
int32_t *diff) {
int32_t ht_cnt;
v16u8 src0, src1, ref0, ref1, pred0, pred1;
v8i16 avg0 = { 0 };
v8i16 avg1 = { 0 };
v4i32 vec, var = { 0 };
for (ht_cnt = 16; ht_cnt--;) {
LD_UB2(sec_pred, 16, pred0, pred1);
sec_pred += 32;
LD_UB2(src_ptr, 16, src0, src1);
src_ptr += src_stride;
LD_UB2(ref_ptr, 16, ref0, ref1);
ref_ptr += ref_stride;
AVER_UB2_UB(src0, pred0, src1, pred1, src0, src1);
CALC_MSE_AVG_B(src0, ref0, var, avg0);
CALC_MSE_AVG_B(src1, ref1, var, avg1);
LD_UB2(sec_pred, 16, pred0, pred1);
sec_pred += 32;
LD_UB2(src_ptr, 16, src0, src1);
src_ptr += src_stride;
LD_UB2(ref_ptr, 16, ref0, ref1);
ref_ptr += ref_stride;
AVER_UB2_UB(src0, pred0, src1, pred1, src0, src1);
CALC_MSE_AVG_B(src0, ref0, var, avg0);
CALC_MSE_AVG_B(src1, ref1, var, avg1);
LD_UB2(sec_pred, 16, pred0, pred1);
sec_pred += 32;
LD_UB2(src_ptr, 16, src0, src1);
src_ptr += src_stride;
LD_UB2(ref_ptr, 16, ref0, ref1);
ref_ptr += ref_stride;
AVER_UB2_UB(src0, pred0, src1, pred1, src0, src1);
CALC_MSE_AVG_B(src0, ref0, var, avg0);
CALC_MSE_AVG_B(src1, ref1, var, avg1);
LD_UB2(sec_pred, 16, pred0, pred1);
sec_pred += 32;
LD_UB2(src_ptr, 16, src0, src1);
src_ptr += src_stride;
LD_UB2(ref_ptr, 16, ref0, ref1);
ref_ptr += ref_stride;
AVER_UB2_UB(src0, pred0, src1, pred1, src0, src1);
CALC_MSE_AVG_B(src0, ref0, var, avg0);
CALC_MSE_AVG_B(src1, ref1, var, avg1);
}
vec = __msa_hadd_s_w(avg0, avg0);
vec += __msa_hadd_s_w(avg1, avg1);
*diff = HADD_SW_S32(vec);
return HADD_SW_S32(var);
}
static uint32_t avg_sse_diff_64x32_msa(const uint8_t *src_ptr,
int32_t src_stride,
const uint8_t *ref_ptr,
int32_t ref_stride,
const uint8_t *sec_pred,
int32_t *diff) {
int32_t ht_cnt;
v16u8 src0, src1, src2, src3;
v16u8 ref0, ref1, ref2, ref3;
v16u8 pred0, pred1, pred2, pred3;
v8i16 avg0 = { 0 };
v8i16 avg1 = { 0 };
v4i32 vec, var = { 0 };
for (ht_cnt = 16; ht_cnt--;) {
LD_UB4(sec_pred, 16, pred0, pred1, pred2, pred3);
sec_pred += 64;
LD_UB4(src_ptr, 16, src0, src1, src2, src3);
src_ptr += src_stride;
LD_UB4(ref_ptr, 16, ref0, ref1, ref2, ref3);
ref_ptr += ref_stride;
AVER_UB4_UB(src0, pred0, src1, pred1, src2, pred2, src3, pred3,
src0, src1, src2, src3);
CALC_MSE_AVG_B(src0, ref0, var, avg0);
CALC_MSE_AVG_B(src2, ref2, var, avg0);
CALC_MSE_AVG_B(src1, ref1, var, avg1);
CALC_MSE_AVG_B(src3, ref3, var, avg1);
LD_UB4(sec_pred, 16, pred0, pred1, pred2, pred3);
sec_pred += 64;
LD_UB4(src_ptr, 16, src0, src1, src2, src3);
src_ptr += src_stride;
LD_UB4(ref_ptr, 16, ref0, ref1, ref2, ref3);
ref_ptr += ref_stride;
AVER_UB4_UB(src0, pred0, src1, pred1, src2, pred2, src3, pred3,
src0, src1, src2, src3);
CALC_MSE_AVG_B(src0, ref0, var, avg0);
CALC_MSE_AVG_B(src2, ref2, var, avg0);
CALC_MSE_AVG_B(src1, ref1, var, avg1);
CALC_MSE_AVG_B(src3, ref3, var, avg1);
}
vec = __msa_hadd_s_w(avg0, avg0);
vec += __msa_hadd_s_w(avg1, avg1);
*diff = HADD_SW_S32(vec);
return HADD_SW_S32(var);
}
static uint32_t avg_sse_diff_64x64_msa(const uint8_t *src_ptr,
int32_t src_stride,
const uint8_t *ref_ptr,
int32_t ref_stride,
const uint8_t *sec_pred,
int32_t *diff) {
int32_t ht_cnt;
v16u8 src0, src1, src2, src3;
v16u8 ref0, ref1, ref2, ref3;
v16u8 pred0, pred1, pred2, pred3;
v8i16 avg0 = { 0 };
v8i16 avg1 = { 0 };
v8i16 avg2 = { 0 };
v8i16 avg3 = { 0 };
v4i32 vec, var = { 0 };
for (ht_cnt = 32; ht_cnt--;) {
LD_UB4(sec_pred, 16, pred0, pred1, pred2, pred3);
sec_pred += 64;
LD_UB4(src_ptr, 16, src0, src1, src2, src3);
src_ptr += src_stride;
LD_UB4(ref_ptr, 16, ref0, ref1, ref2, ref3);
ref_ptr += ref_stride;
AVER_UB4_UB(src0, pred0, src1, pred1, src2, pred2, src3, pred3,
src0, src1, src2, src3);
CALC_MSE_AVG_B(src0, ref0, var, avg0);
CALC_MSE_AVG_B(src1, ref1, var, avg1);
CALC_MSE_AVG_B(src2, ref2, var, avg2);
CALC_MSE_AVG_B(src3, ref3, var, avg3);
LD_UB4(sec_pred, 16, pred0, pred1, pred2, pred3);
sec_pred += 64;
LD_UB4(src_ptr, 16, src0, src1, src2, src3);
src_ptr += src_stride;
LD_UB4(ref_ptr, 16, ref0, ref1, ref2, ref3);
ref_ptr += ref_stride;
AVER_UB4_UB(src0, pred0, src1, pred1, src2, pred2, src3, pred3,
src0, src1, src2, src3);
CALC_MSE_AVG_B(src0, ref0, var, avg0);
CALC_MSE_AVG_B(src1, ref1, var, avg1);
CALC_MSE_AVG_B(src2, ref2, var, avg2);
CALC_MSE_AVG_B(src3, ref3, var, avg3);
}
vec = __msa_hadd_s_w(avg0, avg0);
vec += __msa_hadd_s_w(avg1, avg1);
vec += __msa_hadd_s_w(avg2, avg2);
vec += __msa_hadd_s_w(avg3, avg3);
*diff = HADD_SW_S32(vec);
return HADD_SW_S32(var);
}
static uint32_t sub_pixel_sse_diff_4width_h_msa(const uint8_t *src,
int32_t src_stride,
const uint8_t *dst,
......@@ -748,6 +397,7 @@ static uint32_t sub_pixel_sse_diff_16width_v_msa(const uint8_t *src,
src0 = src4;
/* loop runs height/4 */
CALC_MSE_AVG_B(out0, ref0, var, avg);
CALC_MSE_AVG_B(out1, ref1, var, avg);
CALC_MSE_AVG_B(out2, ref2, var, avg);
......@@ -1050,962 +700,77 @@ static uint32_t sub_pixel_sse_diff_64width_hv_msa(const uint8_t *src,
return sse;
}
static uint32_t sub_pixel_avg_sse_diff_4width_h_msa(const uint8_t *src,
int32_t src_stride,
const uint8_t *dst,
int32_t dst_stride,
const uint8_t *sec_pred,
const int8_t *filter,
int32_t height,
int32_t *diff) {
int16_t filtval;
uint32_t loop_cnt;
uint32_t ref0, ref1, ref2, ref3;
v16u8 out, pred, filt0, ref = { 0 };
v16i8 src0, src1, src2, src3;
v16i8 mask = { 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8 };
v8u16 vec0, vec1, vec2, vec3;
v8u16 const255;
v8i16 avg = { 0 };
v4i32 vec, var = { 0 };
filtval = LH(filter);
filt0 = (v16u8)__msa_fill_h(filtval);
const255 = (v8u16)__msa_ldi_h(255);
for (loop_cnt = (height >> 2); loop_cnt--;) {
LD_SB4(src, src_stride, src0, src1, src2, src3);
src += (4 * src_stride);
pred = LD_UB(sec_pred);
sec_pred += 16;
LW4(dst, dst_stride, ref0, ref1, ref2, ref3);
dst += (4 * dst_stride);
INSERT_W4_UB(ref0, ref1, ref2, ref3, ref);
VSHF_B2_UH(src0, src0, src1, src1, mask, mask, vec0, vec1);
VSHF_B2_UH(src2, src2, src3, src3, mask, mask, vec2, vec3);
DOTP_UB4_UH(vec0, vec1, vec2, vec3, filt0, filt0, filt0, filt0,
vec0, vec1, vec2, vec3);
SRARI_H4_UH(vec0, vec1, vec2, vec3, FILTER_BITS);
MIN_UH4_UH(vec0, vec1, vec2, vec3, const255);
PCKEV_B4_SB(vec0, vec0, vec1, vec1, vec2, vec2, vec3, vec3,
src0, src1, src2, src3);
ILVEV_W2_SB(src0, src1, src2, src3, src0, src2);
out = (v16u8)__msa_ilvev_d((v2i64)src2, (v2i64)src0);
out = __msa_aver_u_b(out, pred);
CALC_MSE_AVG_B(out, ref, var, avg);
}
#define VARIANCE_4Wx4H(sse, diff) VARIANCE_WxH(sse, diff, 4);
#define VARIANCE_4Wx8H(sse, diff) VARIANCE_WxH(sse, diff, 5);
#define VARIANCE_8Wx4H(sse, diff) VARIANCE_WxH(sse, diff, 5);
#define VARIANCE_8Wx8H(sse, diff) VARIANCE_WxH(sse, diff, 6);
#define VARIANCE_8Wx16H(sse, diff) VARIANCE_WxH(sse, diff, 7);
#define VARIANCE_16Wx8H(sse, diff) VARIANCE_WxH(sse, diff, 7);
#define VARIANCE_16Wx16H(sse, diff) VARIANCE_WxH(sse, diff, 8);
vec = __msa_hadd_s_w(avg, avg);
*diff = HADD_SW_S32(vec);
#define VARIANCE_16Wx32H(sse, diff) VARIANCE_LARGE_WxH(sse, diff, 9);
#define VARIANCE_32Wx16H(sse, diff) VARIANCE_LARGE_WxH(sse, diff, 9);
#define VARIANCE_32Wx32H(sse, diff) VARIANCE_LARGE_WxH(sse, diff, 10);
#define VARIANCE_32Wx64H(sse, diff) VARIANCE_LARGE_WxH(sse, diff, 11);
#define VARIANCE_64Wx32H(sse, diff) VARIANCE_LARGE_WxH(sse, diff, 11);
#define VARIANCE_64Wx64H(sse, diff) VARIANCE_LARGE_WxH(sse, diff, 12);
return HADD_SW_S32(var);
#define VP9_SUB_PIXEL_VARIANCE_WDXHT_MSA(wd, ht) \
uint32_t vp9_sub_pixel_variance##wd##x##ht##_msa(const uint8_t *src, \
int32_t src_stride, \
int32_t xoffset, \
int32_t yoffset, \
const uint8_t *ref, \
int32_t ref_stride, \
uint32_t *sse) { \
int32_t diff; \
uint32_t var; \
const int8_t *h_filter = vp9_bilinear_filters_msa[xoffset - 1]; \
const int8_t *v_filter = vp9_bilinear_filters_msa[yoffset - 1]; \
\
if (yoffset) { \
if (xoffset) { \
*sse = sub_pixel_sse_diff_##wd##width_hv_msa(src, src_stride, \
ref, ref_stride, \
h_filter, v_filter, \
ht, &diff); \
} else { \
*sse = sub_pixel_sse_diff_##wd##width_v_msa(src, src_stride, \
ref, ref_stride, \
v_filter, ht, &diff); \
} \
\
var = VARIANCE_##wd##Wx##ht##H(*sse, diff); \
} else { \
if (xoffset) { \
*sse = sub_pixel_sse_diff_##wd##width_h_msa(src, src_stride, \
ref, ref_stride, \
h_filter, ht, &diff); \
\
var = VARIANCE_##wd##Wx##ht##H(*sse, diff); \
} else { \