Commit 0c0fd1e5 authored by Yi Luo's avatar Yi Luo

Lowbd rectangle V/H intra pred sse2 optimization

Function speedup sse2 v. C
Predictor  V_PRED  H_PRED
4x8        ~1.7x   ~1.8x
8x4        ~1.8x   ~2.2x
8x16       ~1.5x   ~1.4x
16x8       ~1.9x   ~1.3x
16x32      ~1.6x   ~1.4x
32x16      ~2.0x   ~1.9x

This patch disables speed tests to save Jenkins build
time. Developer can manually enable them by using,
--gtest_also_run_disabled_test flag in test command line.

Change-Id: I81eaee5e8afc55275c7507c99774f78cc9e49f9a
parent fe809bd1
......@@ -113,29 +113,42 @@ specialize qw/aom_dc_128_predictor_16x16 neon msa sse2/;
specialize qw/aom_dc_128_predictor_16x32 sse2/;
specialize qw/aom_dc_128_predictor_32x16 sse2/;
specialize qw/aom_dc_128_predictor_32x32 msa neon sse2/;
specialize qw/aom_v_predictor_4x4 neon msa sse2/;
specialize qw/aom_v_predictor_4x8 sse2/;
specialize qw/aom_v_predictor_8x4 sse2/;
specialize qw/aom_v_predictor_8x8 neon msa sse2/;
specialize qw/aom_v_predictor_8x16 sse2/;
specialize qw/aom_v_predictor_16x8 sse2/;
specialize qw/aom_v_predictor_16x16 neon msa sse2/;
specialize qw/aom_v_predictor_16x32 sse2/;
specialize qw/aom_v_predictor_32x16 sse2/;
specialize qw/aom_v_predictor_32x32 neon msa sse2/;
specialize qw/aom_h_predictor_4x8 sse2/;
specialize qw/aom_h_predictor_4x4 neon dspr2 msa sse2/;
specialize qw/aom_h_predictor_8x4 sse2/;
specialize qw/aom_h_predictor_8x8 neon dspr2 msa sse2/;
specialize qw/aom_h_predictor_8x16 sse2/;
specialize qw/aom_h_predictor_16x8 sse2/;
specialize qw/aom_h_predictor_16x16 neon dspr2 msa sse2/;
specialize qw/aom_h_predictor_16x32 sse2/;
specialize qw/aom_h_predictor_32x16 sse2/;
specialize qw/aom_h_predictor_32x32 neon msa sse2/;
specialize qw/aom_d63e_predictor_4x4 ssse3/;
specialize qw/aom_h_predictor_4x4 neon dspr2 msa sse2/;
specialize qw/aom_d135_predictor_4x4 neon/;
specialize qw/aom_d153_predictor_4x4 ssse3/;
specialize qw/aom_v_predictor_4x4 neon msa sse2/;
specialize qw/aom_dc_predictor_4x4 dspr2 msa neon sse2/;
specialize qw/aom_dc_predictor_4x8 sse2/;
specialize qw/aom_h_predictor_8x8 neon dspr2 msa sse2/;
specialize qw/aom_d153_predictor_8x8 ssse3/;
specialize qw/aom_v_predictor_8x8 neon msa sse2/;
specialize qw/aom_dc_predictor_8x4 sse2/;
specialize qw/aom_dc_predictor_8x8 dspr2 neon msa sse2/;
specialize qw/aom_dc_predictor_8x16 sse2/;
specialize qw/aom_h_predictor_16x16 neon dspr2 msa sse2/;
specialize qw/aom_d153_predictor_16x16 ssse3/;
specialize qw/aom_v_predictor_16x16 neon msa sse2/;
specialize qw/aom_dc_predictor_16x8 sse2/;
specialize qw/aom_dc_predictor_16x16 dspr2 neon msa sse2/;
specialize qw/aom_dc_predictor_16x32 sse2/;
specialize qw/aom_h_predictor_32x32 neon msa sse2/;
specialize qw/aom_d153_predictor_32x32 ssse3/;
specialize qw/aom_v_predictor_32x32 neon msa sse2/;
specialize qw/aom_dc_predictor_32x16 sse2/;
specialize qw/aom_dc_predictor_32x32 msa neon sse2/;
......
......@@ -385,3 +385,300 @@ void aom_dc_128_predictor_32x16_sse2(uint8_t *dst, ptrdiff_t stride,
const __m128i row = _mm_set1_epi8((uint8_t)128);
dc_store_32xh(&row, 16, dst, stride);
}
// -----------------------------------------------------------------------------
// V_PRED
void aom_v_predictor_4x8_sse2(uint8_t *dst, ptrdiff_t stride,
const uint8_t *above, const uint8_t *left) {
const uint32_t pred = *(uint32_t *)above;
(void)left;
dc_store_4x8(pred, dst, stride);
}
void aom_v_predictor_8x4_sse2(uint8_t *dst, ptrdiff_t stride,
const uint8_t *above, const uint8_t *left) {
const __m128i row = _mm_loadl_epi64((__m128i const *)above);
(void)left;
dc_store_8xh(&row, 4, dst, stride);
}
void aom_v_predictor_8x16_sse2(uint8_t *dst, ptrdiff_t stride,
const uint8_t *above, const uint8_t *left) {
const __m128i row = _mm_loadl_epi64((__m128i const *)above);
(void)left;
dc_store_8xh(&row, 16, dst, stride);
}
void aom_v_predictor_16x8_sse2(uint8_t *dst, ptrdiff_t stride,
const uint8_t *above, const uint8_t *left) {
const __m128i row = _mm_load_si128((__m128i const *)above);
(void)left;
dc_store_16xh(&row, 8, dst, stride);
}
void aom_v_predictor_16x32_sse2(uint8_t *dst, ptrdiff_t stride,
const uint8_t *above, const uint8_t *left) {
const __m128i row = _mm_load_si128((__m128i const *)above);
(void)left;
dc_store_16xh(&row, 32, dst, stride);
}
void aom_v_predictor_32x16_sse2(uint8_t *dst, ptrdiff_t stride,
const uint8_t *above, const uint8_t *left) {
const __m128i row0 = _mm_load_si128((__m128i const *)above);
const __m128i row1 = _mm_load_si128((__m128i const *)(above + 16));
(void)left;
int i;
for (i = 0; i < 16; ++i) {
_mm_store_si128((__m128i *)dst, row0);
_mm_store_si128((__m128i *)(dst + 16), row1);
dst += stride;
}
}
// -----------------------------------------------------------------------------
// H_PRED
void aom_h_predictor_4x8_sse2(uint8_t *dst, ptrdiff_t stride,
const uint8_t *above, const uint8_t *left) {
(void)above;
__m128i left_col = _mm_loadl_epi64((__m128i const *)left);
left_col = _mm_unpacklo_epi8(left_col, left_col);
__m128i row0 = _mm_shufflelo_epi16(left_col, 0);
__m128i row1 = _mm_shufflelo_epi16(left_col, 0x55);
__m128i row2 = _mm_shufflelo_epi16(left_col, 0xaa);
__m128i row3 = _mm_shufflelo_epi16(left_col, 0xff);
*(uint32_t *)dst = _mm_cvtsi128_si32(row0);
dst += stride;
*(uint32_t *)dst = _mm_cvtsi128_si32(row1);
dst += stride;
*(uint32_t *)dst = _mm_cvtsi128_si32(row2);
dst += stride;
*(uint32_t *)dst = _mm_cvtsi128_si32(row3);
dst += stride;
left_col = _mm_unpackhi_epi64(left_col, left_col);
row0 = _mm_shufflelo_epi16(left_col, 0);
row1 = _mm_shufflelo_epi16(left_col, 0x55);
row2 = _mm_shufflelo_epi16(left_col, 0xaa);
row3 = _mm_shufflelo_epi16(left_col, 0xff);
*(uint32_t *)dst = _mm_cvtsi128_si32(row0);
dst += stride;
*(uint32_t *)dst = _mm_cvtsi128_si32(row1);
dst += stride;
*(uint32_t *)dst = _mm_cvtsi128_si32(row2);
dst += stride;
*(uint32_t *)dst = _mm_cvtsi128_si32(row3);
}
void aom_h_predictor_8x4_sse2(uint8_t *dst, ptrdiff_t stride,
const uint8_t *above, const uint8_t *left) {
(void)above;
__m128i left_col = _mm_loadl_epi64((__m128i const *)left);
left_col = _mm_unpacklo_epi8(left_col, left_col);
__m128i row0 = _mm_shufflelo_epi16(left_col, 0);
__m128i row1 = _mm_shufflelo_epi16(left_col, 0x55);
__m128i row2 = _mm_shufflelo_epi16(left_col, 0xaa);
__m128i row3 = _mm_shufflelo_epi16(left_col, 0xff);
_mm_storel_epi64((__m128i *)dst, row0);
dst += stride;
_mm_storel_epi64((__m128i *)dst, row1);
dst += stride;
_mm_storel_epi64((__m128i *)dst, row2);
dst += stride;
_mm_storel_epi64((__m128i *)dst, row3);
}
void aom_h_predictor_8x16_sse2(uint8_t *dst, ptrdiff_t stride,
const uint8_t *above, const uint8_t *left) {
(void)above;
const __m128i left_col = _mm_load_si128((__m128i const *)left);
__m128i left_col_low = _mm_unpacklo_epi8(left_col, left_col);
__m128i left_col_high = _mm_unpackhi_epi8(left_col, left_col);
__m128i row0 = _mm_shufflelo_epi16(left_col_low, 0);
__m128i row1 = _mm_shufflelo_epi16(left_col_low, 0x55);
__m128i row2 = _mm_shufflelo_epi16(left_col_low, 0xaa);
__m128i row3 = _mm_shufflelo_epi16(left_col_low, 0xff);
_mm_storel_epi64((__m128i *)dst, row0);
dst += stride;
_mm_storel_epi64((__m128i *)dst, row1);
dst += stride;
_mm_storel_epi64((__m128i *)dst, row2);
dst += stride;
_mm_storel_epi64((__m128i *)dst, row3);
dst += stride;
left_col_low = _mm_unpackhi_epi64(left_col_low, left_col_low);
row0 = _mm_shufflelo_epi16(left_col_low, 0);
row1 = _mm_shufflelo_epi16(left_col_low, 0x55);
row2 = _mm_shufflelo_epi16(left_col_low, 0xaa);
row3 = _mm_shufflelo_epi16(left_col_low, 0xff);
_mm_storel_epi64((__m128i *)dst, row0);
dst += stride;
_mm_storel_epi64((__m128i *)dst, row1);
dst += stride;
_mm_storel_epi64((__m128i *)dst, row2);
dst += stride;
_mm_storel_epi64((__m128i *)dst, row3);
dst += stride;
row0 = _mm_shufflelo_epi16(left_col_high, 0);
row1 = _mm_shufflelo_epi16(left_col_high, 0x55);
row2 = _mm_shufflelo_epi16(left_col_high, 0xaa);
row3 = _mm_shufflelo_epi16(left_col_high, 0xff);
_mm_storel_epi64((__m128i *)dst, row0);
dst += stride;
_mm_storel_epi64((__m128i *)dst, row1);
dst += stride;
_mm_storel_epi64((__m128i *)dst, row2);
dst += stride;
_mm_storel_epi64((__m128i *)dst, row3);
dst += stride;
left_col_high = _mm_unpackhi_epi64(left_col_high, left_col_high);
row0 = _mm_shufflelo_epi16(left_col_high, 0);
row1 = _mm_shufflelo_epi16(left_col_high, 0x55);
row2 = _mm_shufflelo_epi16(left_col_high, 0xaa);
row3 = _mm_shufflelo_epi16(left_col_high, 0xff);
_mm_storel_epi64((__m128i *)dst, row0);
dst += stride;
_mm_storel_epi64((__m128i *)dst, row1);
dst += stride;
_mm_storel_epi64((__m128i *)dst, row2);
dst += stride;
_mm_storel_epi64((__m128i *)dst, row3);
}
static INLINE void h_pred_store_16xh(const __m128i *row, int h, uint8_t *dst,
ptrdiff_t stride) {
int i;
for (i = 0; i < h; ++i) {
_mm_store_si128((__m128i *)dst, row[i]);
dst += stride;
}
}
static INLINE void repeat_low_4pixels(const __m128i *x, __m128i *row) {
const __m128i u0 = _mm_shufflelo_epi16(*x, 0);
const __m128i u1 = _mm_shufflelo_epi16(*x, 0x55);
const __m128i u2 = _mm_shufflelo_epi16(*x, 0xaa);
const __m128i u3 = _mm_shufflelo_epi16(*x, 0xff);
row[0] = _mm_unpacklo_epi64(u0, u0);
row[1] = _mm_unpacklo_epi64(u1, u1);
row[2] = _mm_unpacklo_epi64(u2, u2);
row[3] = _mm_unpacklo_epi64(u3, u3);
}
static INLINE void repeat_high_4pixels(const __m128i *x, __m128i *row) {
const __m128i u0 = _mm_shufflehi_epi16(*x, 0);
const __m128i u1 = _mm_shufflehi_epi16(*x, 0x55);
const __m128i u2 = _mm_shufflehi_epi16(*x, 0xaa);
const __m128i u3 = _mm_shufflehi_epi16(*x, 0xff);
row[0] = _mm_unpackhi_epi64(u0, u0);
row[1] = _mm_unpackhi_epi64(u1, u1);
row[2] = _mm_unpackhi_epi64(u2, u2);
row[3] = _mm_unpackhi_epi64(u3, u3);
}
// Process 16x8, first 4 rows
// Use first 8 bytes of left register: xxxxxxxx33221100
static INLINE void h_prediction_16x8_1(const __m128i *left, uint8_t *dst,
ptrdiff_t stride) {
__m128i row[4];
repeat_low_4pixels(left, row);
h_pred_store_16xh(row, 4, dst, stride);
}
// Process 16x8, second 4 rows
// Use second 8 bytes of left register: 77665544xxxxxxxx
static INLINE void h_prediction_16x8_2(const __m128i *left, uint8_t *dst,
ptrdiff_t stride) {
__m128i row[4];
repeat_high_4pixels(left, row);
h_pred_store_16xh(row, 4, dst, stride);
}
void aom_h_predictor_16x8_sse2(uint8_t *dst, ptrdiff_t stride,
const uint8_t *above, const uint8_t *left) {
(void)above;
const __m128i left_col = _mm_loadl_epi64((const __m128i *)left);
const __m128i left_col_8p = _mm_unpacklo_epi8(left_col, left_col);
h_prediction_16x8_1(&left_col_8p, dst, stride);
dst += stride << 2;
h_prediction_16x8_2(&left_col_8p, dst, stride);
}
void aom_h_predictor_16x32_sse2(uint8_t *dst, ptrdiff_t stride,
const uint8_t *above, const uint8_t *left) {
__m128i left_col, left_col_8p;
(void)above;
int i = 0;
do {
left_col = _mm_load_si128((const __m128i *)left);
left_col_8p = _mm_unpacklo_epi8(left_col, left_col);
h_prediction_16x8_1(&left_col_8p, dst, stride);
dst += stride << 2;
h_prediction_16x8_2(&left_col_8p, dst, stride);
dst += stride << 2;
left_col_8p = _mm_unpackhi_epi8(left_col, left_col);
h_prediction_16x8_1(&left_col_8p, dst, stride);
dst += stride << 2;
h_prediction_16x8_2(&left_col_8p, dst, stride);
dst += stride << 2;
left += 16;
i++;
} while (i < 2);
}
static INLINE void h_pred_store_32xh(const __m128i *row, int h, uint8_t *dst,
ptrdiff_t stride) {
int i;
for (i = 0; i < h; ++i) {
_mm_store_si128((__m128i *)dst, row[i]);
_mm_store_si128((__m128i *)(dst + 16), row[i]);
dst += stride;
}
}
// Process 32x8, first 4 rows
// Use first 8 bytes of left register: xxxxxxxx33221100
static INLINE void h_prediction_32x8_1(const __m128i *left, uint8_t *dst,
ptrdiff_t stride) {
__m128i row[4];
repeat_low_4pixels(left, row);
h_pred_store_32xh(row, 4, dst, stride);
}
// Process 32x8, second 4 rows
// Use second 8 bytes of left register: 77665544xxxxxxxx
static INLINE void h_prediction_32x8_2(const __m128i *left, uint8_t *dst,
ptrdiff_t stride) {
__m128i row[4];
repeat_high_4pixels(left, row);
h_pred_store_32xh(row, 4, dst, stride);
}
void aom_h_predictor_32x16_sse2(uint8_t *dst, ptrdiff_t stride,
const uint8_t *above, const uint8_t *left) {
__m128i left_col, left_col_8p;
(void)above;
left_col = _mm_load_si128((const __m128i *)left);
left_col_8p = _mm_unpacklo_epi8(left_col, left_col);
h_prediction_32x8_1(&left_col_8p, dst, stride);
dst += stride << 2;
h_prediction_32x8_2(&left_col_8p, dst, stride);
dst += stride << 2;
left_col_8p = _mm_unpackhi_epi8(left_col, left_col);
h_prediction_32x8_1(&left_col_8p, dst, stride);
dst += stride << 2;
h_prediction_32x8_2(&left_col_8p, dst, stride);
}
......@@ -216,8 +216,9 @@ INSTANTIATE_TEST_CASE_P(SSE2_TO_C_12, HighbdIntraPredTest,
#if HAVE_SSE2
const IntraPredFunc<IntraPred> LowbdIntraPredTestVector[] = {
lowbd_intrapred(dc, sse2), lowbd_intrapred(dc_top, sse2),
lowbd_intrapred(dc, sse2), lowbd_intrapred(dc_top, sse2),
lowbd_intrapred(dc_left, sse2), lowbd_intrapred(dc_128, sse2),
lowbd_intrapred(v, sse2), lowbd_intrapred(h, sse2),
};
INSTANTIATE_TEST_CASE_P(SSE2, LowbdIntraPredTest,
......
......@@ -370,7 +370,7 @@ void TestIntraPred32(const char *block_name, AvxPredFunc const *pred_funcs) {
#define INTRA_PRED_TEST(arch, test_func, blk, dc, dc_left, dc_top, dc_128, v, \
h, d45e, d135, d117, d153, d207e, d63e, tm, smooth, \
smooth_v, smooth_h) \
TEST(arch, test_func) { \
TEST(arch, DISABLED_##test_func) { \
static const AvxPredFunc aom_intra_pred[] = { \
dc, dc_left, dc_top, dc_128, v, h, d45e, d135, \
d117, d153, d207e, d63e, tm, smooth, smooth_v, smooth_h \
......@@ -431,8 +431,9 @@ INTRA_PRED_TEST(SSE2_1, TestIntraPred4, "intra4x4", aom_dc_predictor_4x4_sse2,
NULL, NULL, NULL, NULL)
INTRA_PRED_TEST(SSE2_2, TestIntraPred4, "intra4x8", aom_dc_predictor_4x8_sse2,
aom_dc_left_predictor_4x8_sse2, aom_dc_top_predictor_4x8_sse2,
aom_dc_128_predictor_4x8_sse2, NULL, NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL, NULL, NULL, NULL)
aom_dc_128_predictor_4x8_sse2, aom_v_predictor_4x8_sse2,
aom_h_predictor_4x8_sse2, NULL, NULL, NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL)
#endif // HAVE_SSE2
#if HAVE_SSSE3
......@@ -531,12 +532,14 @@ INTRA_PRED_TEST(SSE2_1, TestIntraPred8, "intra8x8", aom_dc_predictor_8x8_sse2,
NULL, NULL, NULL, NULL)
INTRA_PRED_TEST(SSE2_2, TestIntraPred8, "intra8x4", aom_dc_predictor_8x4_sse2,
aom_dc_left_predictor_8x4_sse2, aom_dc_top_predictor_8x4_sse2,
aom_dc_128_predictor_8x4_sse2, NULL, NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL, NULL, NULL, NULL)
aom_dc_128_predictor_8x4_sse2, aom_v_predictor_8x4_sse2,
aom_h_predictor_8x4_sse2, NULL, NULL, NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL)
INTRA_PRED_TEST(SSE2_3, TestIntraPred8, "intra8x16", aom_dc_predictor_8x16_sse2,
aom_dc_left_predictor_8x16_sse2, aom_dc_top_predictor_8x16_sse2,
aom_dc_128_predictor_8x16_sse2, NULL, NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL, NULL, NULL, NULL)
aom_dc_128_predictor_8x16_sse2, aom_v_predictor_8x16_sse2,
aom_h_predictor_8x16_sse2, NULL, NULL, NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL)
#endif // HAVE_SSE2
#if HAVE_SSSE3
......@@ -637,18 +640,14 @@ INTRA_PRED_TEST(SSE2_1, TestIntraPred16, "intra16x16",
INTRA_PRED_TEST(SSE2_2, TestIntraPred16, "intra16x8",
aom_dc_predictor_16x8_sse2, aom_dc_left_predictor_16x8_sse2,
aom_dc_top_predictor_16x8_sse2, aom_dc_128_predictor_16x8_sse2,
// aom_v_predictor_16x8_sse2,
// aom_h_predictor_16x8_sse2,
NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
NULL, NULL)
aom_v_predictor_16x8_sse2, aom_h_predictor_16x8_sse2, NULL,
NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL)
INTRA_PRED_TEST(SSE2_3, TestIntraPred16, "intra16x32",
aom_dc_predictor_16x32_sse2, aom_dc_left_predictor_16x32_sse2,
aom_dc_top_predictor_16x32_sse2,
aom_dc_128_predictor_16x32_sse2,
// aom_v_predictor_16x32_sse2,
// aom_h_predictor_16x32_sse2,
NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
NULL, NULL)
aom_dc_128_predictor_16x32_sse2, aom_v_predictor_16x32_sse2,
aom_h_predictor_16x32_sse2, NULL, NULL, NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL)
#endif // HAVE_SSE2
#if HAVE_SSSE3
......@@ -732,11 +731,9 @@ INTRA_PRED_TEST(SSE2_1, TestIntraPred32, "intra32x32",
INTRA_PRED_TEST(SSE2_2, TestIntraPred32, "intra32x16",
aom_dc_predictor_32x16_sse2, aom_dc_left_predictor_32x16_sse2,
aom_dc_top_predictor_32x16_sse2,
aom_dc_128_predictor_32x16_sse2,
// aom_v_predictor_32x16_sse2,
// aom_h_predictor_32x16_sse2,
NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
NULL, NULL)
aom_dc_128_predictor_32x16_sse2, aom_v_predictor_32x16_sse2,
aom_h_predictor_32x16_sse2, NULL, NULL, NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL)
#endif // HAVE_SSE2
#if HAVE_SSSE3
......@@ -1056,7 +1053,7 @@ void TestHighbdIntraPred32(const char *block_name,
#define HIGHBD_INTRA_PRED_TEST(arch, test_func, block_size, dc, dc_left, \
dc_top, dc_128, v, h, d45e, d135, d117, d153, \
d207e, d63e, tm, smooth, smooth_v, smooth_h) \
TEST(arch, test_func) { \
TEST(arch, DISABLED_##test_func) { \
static const AvxHighbdPredFunc aom_intra_pred[] = { \
dc, dc_left, dc_top, dc_128, v, h, d45e, d135, \
d117, d153, d207e, d63e, tm, smooth, smooth_v, smooth_h \
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment