Commit ae676953 authored by Yi Luo's avatar Yi Luo

Lowbd D207E/D63E/D45E intrapred x86 optimization

D207E
Predictor  SSE2 vs C
4x4        ~2.6X
4x8        ~2.5X
8x4        ~8.0X
8x8        ~9.1X
8x16       ~11.7X
16x8       ~16.9X
16x16      ~17.3X
16x32      ~17.2X
32x16      ~30.2X
32x32      ~35.5X

D63E
Predictor  SSE2 vs C
4x4        ~4.7X
4x8        ~4.9X
8x4        ~7.8X
8x8        ~8.9X
8x16       ~9.3X
16x8       ~15.7X
16x16      ~14.7X
16x32      ~17.3X
32x16      ~18.0X
32x32      ~15.7X

D45E
Predictor  SSSE3 vs C
4x4        ~1.8X
4x8        ~2.9X
8x4        ~6.7X
8x8        ~6.5X
8x16       ~7.4X
16x8       ~24.4X
16x16      ~21.5X
16x32      ~24.2X
32x16      ~25.4X
32x32      ~25.2X

Change-Id: I8215de190e2b6314272749761600e389d1ca0fdf
parent 08ee5c86
......@@ -163,7 +163,6 @@ specialize qw/aom_smooth_predictor_16x32 ssse3/;
specialize qw/aom_smooth_predictor_32x16 ssse3/;
specialize qw/aom_smooth_predictor_32x32 ssse3/;
specialize qw/aom_d63e_predictor_4x4 ssse3/;
specialize qw/aom_d135_predictor_4x4 neon/;
specialize qw/aom_d153_predictor_4x4 ssse3/;
specialize qw/aom_dc_predictor_4x4 dspr2 msa neon sse2/;
......@@ -180,6 +179,38 @@ specialize qw/aom_d153_predictor_32x32 ssse3/;
specialize qw/aom_dc_predictor_32x16 sse2 avx2/;
specialize qw/aom_dc_predictor_32x32 msa neon sse2 avx2/;
specialize qw/aom_d207e_predictor_4x4 sse2/;
specialize qw/aom_d207e_predictor_4x8 sse2/;
specialize qw/aom_d207e_predictor_8x4 sse2/;
specialize qw/aom_d207e_predictor_8x8 sse2/;
specialize qw/aom_d207e_predictor_8x16 sse2/;
specialize qw/aom_d207e_predictor_16x8 sse2/;
specialize qw/aom_d207e_predictor_16x16 sse2/;
specialize qw/aom_d207e_predictor_16x32 sse2/;
specialize qw/aom_d207e_predictor_32x16 sse2/;
specialize qw/aom_d207e_predictor_32x32 sse2/;
specialize qw/aom_d63e_predictor_4x4 sse2 ssse3/;
specialize qw/aom_d63e_predictor_4x8 sse2/;
specialize qw/aom_d63e_predictor_8x4 sse2/;
specialize qw/aom_d63e_predictor_8x8 sse2/;
specialize qw/aom_d63e_predictor_8x16 sse2/;
specialize qw/aom_d63e_predictor_16x8 sse2/;
specialize qw/aom_d63e_predictor_16x16 sse2/;
specialize qw/aom_d63e_predictor_16x32 sse2/;
specialize qw/aom_d63e_predictor_32x16 sse2/;
specialize qw/aom_d63e_predictor_32x32 sse2/;
specialize qw/aom_d45e_predictor_4x4 ssse3/;
specialize qw/aom_d45e_predictor_4x8 ssse3/;
specialize qw/aom_d45e_predictor_8x4 ssse3/;
specialize qw/aom_d45e_predictor_8x8 ssse3/;
specialize qw/aom_d45e_predictor_8x16 ssse3/;
specialize qw/aom_d45e_predictor_16x8 ssse3/;
specialize qw/aom_d45e_predictor_16x16 ssse3/;
specialize qw/aom_d45e_predictor_16x32 ssse3/;
specialize qw/aom_d45e_predictor_32x16 ssse3/;
specialize qw/aom_d45e_predictor_32x32 ssse3/;
if (aom_config("CONFIG_HIGHBITDEPTH") eq "yes") {
specialize qw/aom_highbd_v_predictor_4x4 sse2/;
......
......@@ -682,3 +682,554 @@ void aom_h_predictor_32x16_sse2(uint8_t *dst, ptrdiff_t stride,
dst += stride << 2;
h_prediction_32x8_2(&left_col_8p, dst, stride);
}
// -----------------------------------------------------------------------------
// D207E_PRED
/*
; ------------------------------------------
; input: x, y, z, result
;
; trick from pascal
; (x+2y+z+2)>>2 can be calculated as:
; result = avg(x,z)
; result -= xor(x,z) & 1
; result = avg(result,y)
; ------------------------------------------
*/
static INLINE __m128i avg3_epu8(const __m128i *x, const __m128i *y,
const __m128i *z) {
const __m128i one = _mm_set1_epi8(1);
const __m128i a = _mm_avg_epu8(*x, *z);
__m128i b = _mm_sub_epi8(a, _mm_and_si128(_mm_xor_si128(*x, *z), one));
return _mm_avg_epu8(b, *y);
}
static INLINE void d207e_4x4(const uint8_t *left, uint8_t **dst,
ptrdiff_t stride) {
const __m128i x0 = _mm_loadl_epi64((const __m128i *)left);
const __m128i x1 = _mm_srli_si128(x0, 1);
const __m128i x2 = _mm_srli_si128(x0, 2);
const __m128i x3 = _mm_srli_si128(x0, 3);
const __m128i y0 = _mm_avg_epu8(x0, x1);
const __m128i y1 = _mm_avg_epu8(x1, x2);
const __m128i u0 = avg3_epu8(&x0, &x1, &x2);
const __m128i u1 = avg3_epu8(&x1, &x2, &x3);
const __m128i v0 = _mm_unpacklo_epi8(y0, u0);
const __m128i v1 = _mm_unpacklo_epi8(y1, u1);
*(uint32_t *)*dst = _mm_cvtsi128_si32(v0);
*dst += stride;
*(uint32_t *)*dst = _mm_cvtsi128_si32(v1);
*dst += stride;
*(uint32_t *)*dst = _mm_cvtsi128_si32(_mm_srli_si128(v0, 4));
*dst += stride;
*(uint32_t *)*dst = _mm_cvtsi128_si32(_mm_srli_si128(v1, 4));
*dst += stride;
}
void aom_d207e_predictor_4x4_sse2(uint8_t *dst, ptrdiff_t stride,
const uint8_t *above, const uint8_t *left) {
(void)above;
d207e_4x4(left, &dst, stride);
}
void aom_d207e_predictor_4x8_sse2(uint8_t *dst, ptrdiff_t stride,
const uint8_t *above, const uint8_t *left) {
(void)above;
d207e_4x4(left, &dst, stride);
d207e_4x4(left + 4, &dst, stride);
}
static INLINE void d207e_8x4(const uint8_t *left, uint8_t **dst,
ptrdiff_t stride) {
const __m128i x0 = _mm_loadl_epi64((const __m128i *)left);
const __m128i x1 = _mm_loadl_epi64((const __m128i *)(left + 1));
const __m128i x2 = _mm_loadl_epi64((const __m128i *)(left + 2));
const __m128i x3 = _mm_loadl_epi64((const __m128i *)(left + 3));
const __m128i x4 = _mm_loadl_epi64((const __m128i *)(left + 4));
const __m128i x5 = _mm_loadl_epi64((const __m128i *)(left + 5));
const __m128i y0 = _mm_avg_epu8(x0, x1);
const __m128i y1 = _mm_avg_epu8(x1, x2);
const __m128i y2 = _mm_avg_epu8(x2, x3);
const __m128i y3 = _mm_avg_epu8(x3, x4);
const __m128i u0 = avg3_epu8(&x0, &x1, &x2);
const __m128i u1 = avg3_epu8(&x1, &x2, &x3);
const __m128i u2 = avg3_epu8(&x2, &x3, &x4);
const __m128i u3 = avg3_epu8(&x3, &x4, &x5);
_mm_storel_epi64((__m128i *)*dst, _mm_unpacklo_epi8(y0, u0));
*dst += stride;
_mm_storel_epi64((__m128i *)*dst, _mm_unpacklo_epi8(y1, u1));
*dst += stride;
_mm_storel_epi64((__m128i *)*dst, _mm_unpacklo_epi8(y2, u2));
*dst += stride;
_mm_storel_epi64((__m128i *)*dst, _mm_unpacklo_epi8(y3, u3));
*dst += stride;
}
void aom_d207e_predictor_8x4_sse2(uint8_t *dst, ptrdiff_t stride,
const uint8_t *above, const uint8_t *left) {
(void)above;
d207e_8x4(left, &dst, stride);
}
void aom_d207e_predictor_8x8_sse2(uint8_t *dst, ptrdiff_t stride,
const uint8_t *above, const uint8_t *left) {
(void)above;
d207e_8x4(left, &dst, stride);
d207e_8x4(left + 4, &dst, stride);
}
void aom_d207e_predictor_8x16_sse2(uint8_t *dst, ptrdiff_t stride,
const uint8_t *above, const uint8_t *left) {
(void)above;
d207e_8x4(left, &dst, stride);
d207e_8x4(left + 4, &dst, stride);
d207e_8x4(left + 8, &dst, stride);
d207e_8x4(left + 12, &dst, stride);
}
static INLINE void d207e_16x4(const uint8_t *left, uint8_t **dst,
ptrdiff_t stride) {
const __m128i x0 = _mm_loadu_si128((const __m128i *)left);
const __m128i x1 = _mm_loadu_si128((const __m128i *)(left + 1));
const __m128i x2 = _mm_loadu_si128((const __m128i *)(left + 2));
const __m128i x3 = _mm_loadu_si128((const __m128i *)(left + 3));
const __m128i x4 = _mm_loadu_si128((const __m128i *)(left + 4));
const __m128i x5 = _mm_loadu_si128((const __m128i *)(left + 5));
const __m128i y0 = _mm_avg_epu8(x0, x1);
const __m128i y1 = _mm_avg_epu8(x1, x2);
const __m128i y2 = _mm_avg_epu8(x2, x3);
const __m128i y3 = _mm_avg_epu8(x3, x4);
const __m128i u0 = avg3_epu8(&x0, &x1, &x2);
const __m128i u1 = avg3_epu8(&x1, &x2, &x3);
const __m128i u2 = avg3_epu8(&x2, &x3, &x4);
const __m128i u3 = avg3_epu8(&x3, &x4, &x5);
_mm_store_si128((__m128i *)*dst, _mm_unpacklo_epi8(y0, u0));
*dst += stride;
_mm_store_si128((__m128i *)*dst, _mm_unpacklo_epi8(y1, u1));
*dst += stride;
_mm_store_si128((__m128i *)*dst, _mm_unpacklo_epi8(y2, u2));
*dst += stride;
_mm_store_si128((__m128i *)*dst, _mm_unpacklo_epi8(y3, u3));
*dst += stride;
}
void aom_d207e_predictor_16x8_sse2(uint8_t *dst, ptrdiff_t stride,
const uint8_t *above, const uint8_t *left) {
(void)above;
d207e_16x4(left, &dst, stride);
d207e_16x4(left + 4, &dst, stride);
}
void aom_d207e_predictor_16x16_sse2(uint8_t *dst, ptrdiff_t stride,
const uint8_t *above, const uint8_t *left) {
(void)above;
d207e_16x4(left, &dst, stride);
d207e_16x4(left + 4, &dst, stride);
d207e_16x4(left + 8, &dst, stride);
d207e_16x4(left + 12, &dst, stride);
}
void aom_d207e_predictor_16x32_sse2(uint8_t *dst, ptrdiff_t stride,
const uint8_t *above, const uint8_t *left) {
(void)above;
int i;
for (i = 0; i < 32; i += 4) {
d207e_16x4(left + i, &dst, stride);
}
}
static INLINE void d207e_32x4(const uint8_t *left, uint8_t **dst,
ptrdiff_t stride) {
const __m128i x0 = _mm_loadu_si128((const __m128i *)left);
const __m128i x1 = _mm_loadu_si128((const __m128i *)(left + 1));
const __m128i x2 = _mm_loadu_si128((const __m128i *)(left + 2));
const __m128i x3 = _mm_loadu_si128((const __m128i *)(left + 3));
const __m128i x4 = _mm_loadu_si128((const __m128i *)(left + 4));
const __m128i x5 = _mm_loadu_si128((const __m128i *)(left + 5));
const __m128i y0 = _mm_avg_epu8(x0, x1);
const __m128i y1 = _mm_avg_epu8(x1, x2);
const __m128i y2 = _mm_avg_epu8(x2, x3);
const __m128i y3 = _mm_avg_epu8(x3, x4);
const __m128i u0 = avg3_epu8(&x0, &x1, &x2);
const __m128i u1 = avg3_epu8(&x1, &x2, &x3);
const __m128i u2 = avg3_epu8(&x2, &x3, &x4);
const __m128i u3 = avg3_epu8(&x3, &x4, &x5);
_mm_store_si128((__m128i *)*dst, _mm_unpacklo_epi8(y0, u0));
_mm_store_si128((__m128i *)(*dst + 16), _mm_unpackhi_epi8(y0, u0));
*dst += stride;
_mm_store_si128((__m128i *)*dst, _mm_unpacklo_epi8(y1, u1));
_mm_store_si128((__m128i *)(*dst + 16), _mm_unpackhi_epi8(y1, u1));
*dst += stride;
_mm_store_si128((__m128i *)*dst, _mm_unpacklo_epi8(y2, u2));
_mm_store_si128((__m128i *)(*dst + 16), _mm_unpackhi_epi8(y2, u2));
*dst += stride;
_mm_store_si128((__m128i *)*dst, _mm_unpacklo_epi8(y3, u3));
_mm_store_si128((__m128i *)(*dst + 16), _mm_unpackhi_epi8(y3, u3));
*dst += stride;
}
void aom_d207e_predictor_32x16_sse2(uint8_t *dst, ptrdiff_t stride,
const uint8_t *above, const uint8_t *left) {
(void)above;
int i;
for (i = 0; i < 16; i += 4) {
d207e_32x4(left + i, &dst, stride);
}
}
void aom_d207e_predictor_32x32_sse2(uint8_t *dst, ptrdiff_t stride,
const uint8_t *above, const uint8_t *left) {
(void)above;
int i;
for (i = 0; i < 32; i += 4) {
d207e_32x4(left + i, &dst, stride);
}
}
// -----------------------------------------------------------------------------
// D63E_PRED
#define D63E_STORE_4X4 \
do { \
*(uint32_t *)dst = _mm_cvtsi128_si32(y0); \
dst += stride; \
*(uint32_t *)dst = _mm_cvtsi128_si32(u0); \
dst += stride; \
*(uint32_t *)dst = _mm_cvtsi128_si32(y1); \
dst += stride; \
*(uint32_t *)dst = _mm_cvtsi128_si32(u1); \
dst += stride; \
} while (0)
void aom_d63e_predictor_4x4_sse2(uint8_t *dst, ptrdiff_t stride,
const uint8_t *above, const uint8_t *left) {
(void)left;
const __m128i x0 = _mm_loadl_epi64((const __m128i *)above);
const __m128i x1 = _mm_loadl_epi64((const __m128i *)(above + 1));
const __m128i x2 = _mm_loadl_epi64((const __m128i *)(above + 2));
const __m128i x3 = _mm_loadl_epi64((const __m128i *)(above + 3));
const __m128i y0 = _mm_avg_epu8(x0, x1);
const __m128i y1 = _mm_avg_epu8(x1, x2);
const __m128i u0 = avg3_epu8(&x0, &x1, &x2);
const __m128i u1 = avg3_epu8(&x1, &x2, &x3);
D63E_STORE_4X4;
}
void aom_d63e_predictor_4x8_sse2(uint8_t *dst, ptrdiff_t stride,
const uint8_t *above, const uint8_t *left) {
(void)left;
__m128i x0 = _mm_loadl_epi64((const __m128i *)above);
__m128i x1 = _mm_loadl_epi64((const __m128i *)(above + 1));
const __m128i x2 = _mm_loadl_epi64((const __m128i *)(above + 2));
const __m128i x3 = _mm_loadl_epi64((const __m128i *)(above + 3));
__m128i y0 = _mm_avg_epu8(x0, x1);
__m128i y1 = _mm_avg_epu8(x1, x2);
__m128i u0 = avg3_epu8(&x0, &x1, &x2);
__m128i u1 = avg3_epu8(&x1, &x2, &x3);
D63E_STORE_4X4;
x0 = _mm_loadl_epi64((const __m128i *)(above + 4));
x1 = _mm_loadl_epi64((const __m128i *)(above + 5));
y0 = _mm_avg_epu8(x2, x3);
y1 = _mm_avg_epu8(x3, x0);
u0 = avg3_epu8(&x2, &x3, &x0);
u1 = avg3_epu8(&x3, &x0, &x1);
D63E_STORE_4X4;
}
#define D63E_STORE_8X4 \
do { \
_mm_storel_epi64((__m128i *)dst, y0); \
dst += stride; \
_mm_storel_epi64((__m128i *)dst, u0); \
dst += stride; \
_mm_storel_epi64((__m128i *)dst, y1); \
dst += stride; \
_mm_storel_epi64((__m128i *)dst, u1); \
dst += stride; \
} while (0)
void aom_d63e_predictor_8x4_sse2(uint8_t *dst, ptrdiff_t stride,
const uint8_t *above, const uint8_t *left) {
(void)left;
const __m128i x0 = _mm_loadl_epi64((const __m128i *)above);
const __m128i x1 = _mm_loadl_epi64((const __m128i *)(above + 1));
const __m128i x2 = _mm_loadl_epi64((const __m128i *)(above + 2));
const __m128i x3 = _mm_loadl_epi64((const __m128i *)(above + 3));
const __m128i y0 = _mm_avg_epu8(x0, x1);
const __m128i y1 = _mm_avg_epu8(x1, x2);
const __m128i u0 = avg3_epu8(&x0, &x1, &x2);
const __m128i u1 = avg3_epu8(&x1, &x2, &x3);
D63E_STORE_8X4;
}
void aom_d63e_predictor_8x8_sse2(uint8_t *dst, ptrdiff_t stride,
const uint8_t *above, const uint8_t *left) {
(void)left;
__m128i x0 = _mm_loadl_epi64((const __m128i *)above);
__m128i x1 = _mm_loadl_epi64((const __m128i *)(above + 1));
const __m128i x2 = _mm_loadl_epi64((const __m128i *)(above + 2));
const __m128i x3 = _mm_loadl_epi64((const __m128i *)(above + 3));
__m128i y0 = _mm_avg_epu8(x0, x1);
__m128i y1 = _mm_avg_epu8(x1, x2);
__m128i u0 = avg3_epu8(&x0, &x1, &x2);
__m128i u1 = avg3_epu8(&x1, &x2, &x3);
D63E_STORE_8X4;
x0 = _mm_loadl_epi64((const __m128i *)(above + 4));
x1 = _mm_loadl_epi64((const __m128i *)(above + 5));
y0 = _mm_avg_epu8(x2, x3);
y1 = _mm_avg_epu8(x3, x0);
u0 = avg3_epu8(&x2, &x3, &x0);
u1 = avg3_epu8(&x3, &x0, &x1);
D63E_STORE_8X4;
}
void aom_d63e_predictor_8x16_sse2(uint8_t *dst, ptrdiff_t stride,
const uint8_t *above, const uint8_t *left) {
(void)left;
__m128i x0 = _mm_loadl_epi64((const __m128i *)above);
__m128i x1 = _mm_loadl_epi64((const __m128i *)(above + 1));
int i = 2;
do {
__m128i x2 = _mm_loadl_epi64((const __m128i *)(above + i++));
__m128i x3 = _mm_loadl_epi64((const __m128i *)(above + i++));
__m128i y0 = _mm_avg_epu8(x0, x1);
__m128i y1 = _mm_avg_epu8(x1, x2);
__m128i u0 = avg3_epu8(&x0, &x1, &x2);
__m128i u1 = avg3_epu8(&x1, &x2, &x3);
D63E_STORE_8X4;
x0 = _mm_loadl_epi64((const __m128i *)(above + i++));
x1 = _mm_loadl_epi64((const __m128i *)(above + i++));
y0 = _mm_avg_epu8(x2, x3);
y1 = _mm_avg_epu8(x3, x0);
u0 = avg3_epu8(&x2, &x3, &x0);
u1 = avg3_epu8(&x3, &x0, &x1);
D63E_STORE_8X4;
} while (i < 10);
}
#define D63E_STORE_16X4 \
do { \
_mm_store_si128((__m128i *)dst, y0); \
dst += stride; \
_mm_store_si128((__m128i *)dst, u0); \
dst += stride; \
_mm_store_si128((__m128i *)dst, y1); \
dst += stride; \
_mm_store_si128((__m128i *)dst, u1); \
dst += stride; \
} while (0)
void aom_d63e_predictor_16x8_sse2(uint8_t *dst, ptrdiff_t stride,
const uint8_t *above, const uint8_t *left) {
(void)left;
__m128i x0 = _mm_load_si128((const __m128i *)above);
__m128i x1 = _mm_loadu_si128((const __m128i *)(above + 1));
const __m128i x2 = _mm_loadu_si128((const __m128i *)(above + 2));
const __m128i x3 = _mm_loadu_si128((const __m128i *)(above + 3));
__m128i y0 = _mm_avg_epu8(x0, x1);
__m128i y1 = _mm_avg_epu8(x1, x2);
__m128i u0 = avg3_epu8(&x0, &x1, &x2);
__m128i u1 = avg3_epu8(&x1, &x2, &x3);
D63E_STORE_16X4;
x0 = _mm_loadu_si128((const __m128i *)(above + 4));
x1 = _mm_loadu_si128((const __m128i *)(above + 5));
y0 = _mm_avg_epu8(x2, x3);
y1 = _mm_avg_epu8(x3, x0);
u0 = avg3_epu8(&x2, &x3, &x0);
u1 = avg3_epu8(&x3, &x0, &x1);
D63E_STORE_16X4;
}
void aom_d63e_predictor_16x16_sse2(uint8_t *dst, ptrdiff_t stride,
const uint8_t *above, const uint8_t *left) {
(void)left;
__m128i x0 = _mm_load_si128((const __m128i *)above);
__m128i x1 = _mm_loadu_si128((const __m128i *)(above + 1));
int i = 2;
do {
__m128i x2 = _mm_loadu_si128((const __m128i *)(above + i++));
__m128i x3 = _mm_loadu_si128((const __m128i *)(above + i++));
__m128i y0 = _mm_avg_epu8(x0, x1);
__m128i y1 = _mm_avg_epu8(x1, x2);
__m128i u0 = avg3_epu8(&x0, &x1, &x2);
__m128i u1 = avg3_epu8(&x1, &x2, &x3);
D63E_STORE_16X4;
x0 = _mm_loadu_si128((const __m128i *)(above + i++));
x1 = _mm_loadu_si128((const __m128i *)(above + i++));
y0 = _mm_avg_epu8(x2, x3);
y1 = _mm_avg_epu8(x3, x0);
u0 = avg3_epu8(&x2, &x3, &x0);
u1 = avg3_epu8(&x3, &x0, &x1);
D63E_STORE_16X4;
} while (i < 10);
}
void aom_d63e_predictor_16x32_sse2(uint8_t *dst, ptrdiff_t stride,
const uint8_t *above, const uint8_t *left) {
(void)left;
__m128i x0 = _mm_load_si128((const __m128i *)above);
__m128i x1 = _mm_loadu_si128((const __m128i *)(above + 1));
int i = 2;
do {
__m128i x2 = _mm_loadu_si128((const __m128i *)(above + i++));
__m128i x3 = _mm_loadu_si128((const __m128i *)(above + i++));
__m128i y0 = _mm_avg_epu8(x0, x1);
__m128i y1 = _mm_avg_epu8(x1, x2);
__m128i u0 = avg3_epu8(&x0, &x1, &x2);
__m128i u1 = avg3_epu8(&x1, &x2, &x3);
D63E_STORE_16X4;
x0 = _mm_loadu_si128((const __m128i *)(above + i++));
x1 = _mm_loadu_si128((const __m128i *)(above + i++));
y0 = _mm_avg_epu8(x2, x3);
y1 = _mm_avg_epu8(x3, x0);
u0 = avg3_epu8(&x2, &x3, &x0);
u1 = avg3_epu8(&x3, &x0, &x1);
D63E_STORE_16X4;
} while (i < 18);
}
#define D63E_STORE_32X4 \
do { \
_mm_store_si128((__m128i *)dst, y0); \
_mm_store_si128((__m128i *)(dst + 16), z0); \
dst += stride; \
_mm_store_si128((__m128i *)dst, u0); \
_mm_store_si128((__m128i *)(dst + 16), v0); \
dst += stride; \
_mm_store_si128((__m128i *)dst, y1); \
_mm_store_si128((__m128i *)(dst + 16), z1); \
dst += stride; \
_mm_store_si128((__m128i *)dst, u1); \
_mm_store_si128((__m128i *)(dst + 16), v1); \
dst += stride; \
} while (0)
static INLINE void d63e_w32(const uint8_t *above, uint8_t *dst,
ptrdiff_t stride, int num) {
__m128i x0, x1, x2, x3, a0, a1, a2, a3;
__m128i y0, y1, u0, u1, z0, z1, v0, v1;
const int count = (num >> 1) + 2;
x0 = _mm_load_si128((const __m128i *)above);
x1 = _mm_loadu_si128((const __m128i *)(above + 1));
a0 = _mm_loadu_si128((const __m128i *)(above + 16));
a1 = _mm_loadu_si128((const __m128i *)(above + 16 + 1));
int i = 2;
do {
x2 = _mm_loadu_si128((const __m128i *)(above + i));
a2 = _mm_loadu_si128((const __m128i *)(above + 16 + i++));
x3 = _mm_loadu_si128((const __m128i *)(above + i));
a3 = _mm_loadu_si128((const __m128i *)(above + 16 + i++));
y0 = _mm_avg_epu8(x0, x1);
y1 = _mm_avg_epu8(x1, x2);
u0 = avg3_epu8(&x0, &x1, &x2);
u1 = avg3_epu8(&x1, &x2, &x3);
z0 = _mm_avg_epu8(a0, a1);
z1 = _mm_avg_epu8(a1, a2);
v0 = avg3_epu8(&a0, &a1, &a2);
v1 = avg3_epu8(&a1, &a2, &a3);
D63E_STORE_32X4;
x0 = _mm_loadu_si128((const __m128i *)(above + i));
a0 = _mm_loadu_si128((const __m128i *)(above + 16 + i++));
x1 = _mm_loadu_si128((const __m128i *)(above + i));
a1 = _mm_loadu_si128((const __m128i *)(above + 16 + i++));
y0 = _mm_avg_epu8(x2, x3);
y1 = _mm_avg_epu8(x3, x0);
u0 = avg3_epu8(&x2, &x3, &x0);
u1 = avg3_epu8(&x3, &x0, &x1);
z0 = _mm_avg_epu8(a2, a3);
z1 = _mm_avg_epu8(a3, a0);
v0 = avg3_epu8(&a2, &a3, &a0);
v1 = avg3_epu8(&a3, &a0, &a1);
D63E_STORE_32X4;
} while (i < count);
}
void aom_d63e_predictor_32x16_sse2(uint8_t *dst, ptrdiff_t stride,
const uint8_t *above, const uint8_t *left) {
(void)left;
d63e_w32(above, dst, stride, 16);
}
void aom_d63e_predictor_32x32_sse2(uint8_t *dst, ptrdiff_t stride,
const uint8_t *above, const uint8_t *left) {
(void)left;
d63e_w32(above, dst, stride, 32);
}
......@@ -883,3 +883,370 @@ void aom_smooth_predictor_32x32_ssse3(uint8_t *dst, ptrdiff_t stride,
dst += stride << 3;
smooth_pred_32x8(pixels, &wh[6], ww, dst, stride, 3);
}
// -----------------------------------------------------------------------------
// D45E_PRED
/*
; ------------------------------------------
; input: x, y, z, result
;
; trick from pascal
; (x+2y+z+2)>>2 can be calculated as:
; result = avg(x,z)
; result -= xor(x,z) & 1
; result = avg(result,y)
; ------------------------------------------
*/
static INLINE __m128i avg3_epu8(const __m128i *x, const __m128i *y,
const __m128i *z) {
const __m128i one = _mm_set1_epi8(1);
const __m128i a = _mm_avg_epu8(*x, *z);
__m128i b = _mm_sub_epi8(a, _mm_and_si128(_mm_xor_si128(*x, *z), one));
return _mm_avg_epu8(b, *y);
}
void aom_d45e_predictor_4x4_ssse3(uint8_t *dst, ptrdiff_t stride,
const uint8_t *above, const uint8_t *left) {
(void)left;
const __m128i x0 = _mm_loadl_epi64((const __m128i *)above);
const __m128i x1 = _mm_loadl_epi64((const __m128i *)(above + 1));
const __m128i x2 = _mm_loadl_epi64((const __m128i *)(above + 2));
const __m128i x3 = _mm_loadl_epi64((const __m128i *)(above + 3));
const __m128i x4 = _mm_loadl_epi64((const __m128i *)(above + 4));
const __m128i mask = _mm_set1_epi32(0x7070605);
const __m128i x5 = _mm_shuffle_epi8(x0, mask);
const __m128i y0 = avg3_epu8(&x0, &x1, &x2);
const __m128i y1 = avg3_epu8(&x1, &x2, &x3);
const __m128i y2 = avg3_epu8(&x2, &x3, &x4);
const __m128i y3 = avg3_epu8(&x3, &x4, &x5);
*(uint32_t *)dst = _mm_cvtsi128_si32(y0);
dst += stride;
*(uint32_t *)dst = _mm_cvtsi128_si32(y1);
dst += stride;
*(uint32_t *)dst = _mm_cvtsi128_si32(y2);
dst += stride;
*(uint32_t *)dst = _mm_cvtsi128_si32(y3);
}
static INLINE void d45e_w4(const __m128i *a0, const __m128i *a1,
const __m128i *a2, uint8_t **dst, ptrdiff_t stride) {
const __m128i y = avg3_epu8(a0, a1, a2);
*(uint32_t *)*dst = _mm_cvtsi128_si32(y);
*dst += stride;
}
void aom_d45e_predictor_4x8_ssse3(uint8_t *dst, ptrdiff_t stride,
const uint8_t *above, const uint8_t *left) {
(void)left;
const __m128i v = _mm_load_si128((const __m128i *)above);
__m128i x1 = _mm_alignr_epi8(v, v, 1);
__m128i x2 = _mm_alignr_epi8(v, v, 2);
d45e_w4(&v, &x1, &x2, &dst, stride);
int i = 3;
__m128i x3;
do {
x3 = _mm_loadu_si128((const __m128i *)(above + i++));
d45e_w4(&x1, &x2, &x3, &dst, stride);
x1 = _mm_loadu_si128((const __m128i *)(above + i++));
d45e_w4(&x2, &x3, &x1, &dst, stride);
x2 = _mm_loadu_si128((const __m128i *)(above + i++));
d45e_w4(&x3, &x1, &x2, &dst, stride);
} while (i < 9);
x3 = _mm_alignr_epi8(v, v, 9);
const __m128i mask = _mm_set1_epi32(0x2020100);
x3 = _mm_shuffle_epi8(x3, mask);
d45e_w4(&x1, &x2, &x3, &dst, stride);
}
static INLINE void d45e_w8(const __m128i *a0, const __m128i *a1,
const __m128i *a2, uint8_t **dst, ptrdiff_t stride) {
const __m128i y = avg3_epu8(a0, a1, a2);
_mm_storel_epi64((__m128i *)*dst, y);
*dst += stride;
}