Commit 0b7127b3 authored by Yi Luo's avatar Yi Luo

Highbd D207E/D63E intrapred sse2/avx2 optimization

D207E
Predictor SSE2 vs C   AVX2 vs C
4x4       ~2.7x
4x8       ~3.0x
8x4       ~7.2x
8x8       ~8.5x
8x16      ~9.4x
16x8      ~12.8x
16x16     ~13.0x
16x32     ~14.3x
32x16                 ~19.9x
32x32                 ~23.6x

D63E
Predictor SSE2 vs C   AVX2 vs C
4x4       ~3.8x
4x8       ~4.3x
8x4       ~6.4x
8x8       ~6.8x
8x16      ~8.6x
16x8                  ~9.0x
16x16                 ~9.6x
16x32                 ~10.3x
32x16                 ~9.1x
32x32                 ~11.0x

Change-Id: I87373804c9d53276bf4d7788c4ae0d13d01c00dc
parent cbce03d2
......@@ -266,6 +266,28 @@ if (aom_config("CONFIG_HIGHBITDEPTH") eq "yes") {
specialize qw/aom_highbd_d45e_predictor_16x32 avx2/;
specialize qw/aom_highbd_d45e_predictor_32x16 avx2/;
specialize qw/aom_highbd_d45e_predictor_32x32 avx2/;
specialize qw/aom_highbd_d207e_predictor_4x4 sse2/;
specialize qw/aom_highbd_d207e_predictor_4x8 sse2/;
specialize qw/aom_highbd_d207e_predictor_8x4 sse2/;
specialize qw/aom_highbd_d207e_predictor_8x8 sse2/;
specialize qw/aom_highbd_d207e_predictor_8x16 sse2/;
specialize qw/aom_highbd_d207e_predictor_16x8 sse2/;
specialize qw/aom_highbd_d207e_predictor_16x16 sse2/;
specialize qw/aom_highbd_d207e_predictor_16x32 sse2/;
specialize qw/aom_highbd_d207e_predictor_32x16 avx2/;
specialize qw/aom_highbd_d207e_predictor_32x32 avx2/;
specialize qw/aom_highbd_d63e_predictor_4x4 sse2/;
specialize qw/aom_highbd_d63e_predictor_4x8 sse2/;
specialize qw/aom_highbd_d63e_predictor_8x4 sse2/;
specialize qw/aom_highbd_d63e_predictor_8x8 sse2/;
specialize qw/aom_highbd_d63e_predictor_8x16 sse2/;
specialize qw/aom_highbd_d63e_predictor_16x8 avx2/;
specialize qw/aom_highbd_d63e_predictor_16x16 avx2/;
specialize qw/aom_highbd_d63e_predictor_16x32 avx2/;
specialize qw/aom_highbd_d63e_predictor_32x16 avx2/;
specialize qw/aom_highbd_d63e_predictor_32x32 avx2/;
} # CONFIG_HIGHBITDEPTH
#
......
......@@ -238,3 +238,254 @@ void aom_highbd_d45e_predictor_32x32_avx2(uint16_t *dst, ptrdiff_t stride,
u = avg3_epu16(&y1, &y2, &y0);
_mm256_storeu_si256((__m256i *)dst2, u);
}
// -----------------------------------------------------------------------------
// D207E_PRED
static INLINE void d207_32x4(const uint16_t *left, uint16_t **dst,
ptrdiff_t stride) {
const __m256i x0 = _mm256_loadu_si256((const __m256i *)left);
const __m256i x1 = _mm256_loadu_si256((const __m256i *)(left + 1));
const __m256i x2 = _mm256_loadu_si256((const __m256i *)(left + 2));
const __m256i x3 = _mm256_loadu_si256((const __m256i *)(left + 3));
const __m256i x4 = _mm256_loadu_si256((const __m256i *)(left + 4));
const __m256i x5 = _mm256_loadu_si256((const __m256i *)(left + 5));
const __m256i y0 = _mm256_avg_epu16(x0, x1);
const __m256i y1 = _mm256_avg_epu16(x1, x2);
const __m256i y2 = _mm256_avg_epu16(x2, x3);
const __m256i y3 = _mm256_avg_epu16(x3, x4);
const __m256i u0 = avg3_epu16(&x0, &x1, &x2);
const __m256i u1 = avg3_epu16(&x1, &x2, &x3);
const __m256i u2 = avg3_epu16(&x2, &x3, &x4);
const __m256i u3 = avg3_epu16(&x3, &x4, &x5);
__m256i v0 = _mm256_unpacklo_epi16(y0, u0);
__m256i v1 = _mm256_unpackhi_epi16(y0, u0);
_mm256_storeu_si256((__m256i *)*dst, _mm256_permute2x128_si256(v0, v1, 0x20));
_mm256_storeu_si256((__m256i *)(*dst + 16),
_mm256_permute2x128_si256(v0, v1, 0x31));
*dst += stride;
v0 = _mm256_unpacklo_epi16(y1, u1);
v1 = _mm256_unpackhi_epi16(y1, u1);
_mm256_storeu_si256((__m256i *)*dst, _mm256_permute2x128_si256(v0, v1, 0x20));
_mm256_storeu_si256((__m256i *)(*dst + 16),
_mm256_permute2x128_si256(v0, v1, 0x31));
*dst += stride;
v0 = _mm256_unpacklo_epi16(y2, u2);
v1 = _mm256_unpackhi_epi16(y2, u2);
_mm256_storeu_si256((__m256i *)*dst, _mm256_permute2x128_si256(v0, v1, 0x20));
_mm256_storeu_si256((__m256i *)(*dst + 16),
_mm256_permute2x128_si256(v0, v1, 0x31));
*dst += stride;
v0 = _mm256_unpacklo_epi16(y3, u3);
v1 = _mm256_unpackhi_epi16(y3, u3);
_mm256_storeu_si256((__m256i *)*dst, _mm256_permute2x128_si256(v0, v1, 0x20));
_mm256_storeu_si256((__m256i *)(*dst + 16),
_mm256_permute2x128_si256(v0, v1, 0x31));
*dst += stride;
}
void aom_highbd_d207e_predictor_32x16_avx2(uint16_t *dst, ptrdiff_t stride,
const uint16_t *above,
const uint16_t *left, int bd) {
(void)above;
(void)bd;
int i;
for (i = 0; i < 16; i += 4) {
d207_32x4(left + i, &dst, stride);
}
}
void aom_highbd_d207e_predictor_32x32_avx2(uint16_t *dst, ptrdiff_t stride,
const uint16_t *above,
const uint16_t *left, int bd) {
(void)above;
(void)bd;
int i;
for (i = 0; i < 32; i += 4) {
d207_32x4(left + i, &dst, stride);
}
}
#define D63E_STORE_16X4 \
do { \
_mm256_storeu_si256((__m256i *)dst, y0); \
dst += stride; \
_mm256_storeu_si256((__m256i *)dst, u0); \
dst += stride; \
_mm256_storeu_si256((__m256i *)dst, y1); \
dst += stride; \
_mm256_storeu_si256((__m256i *)dst, u1); \
dst += stride; \
} while (0)
void aom_highbd_d63e_predictor_16x8_avx2(uint16_t *dst, ptrdiff_t stride,
const uint16_t *above,
const uint16_t *left, int bd) {
(void)left;
(void)bd;
__m256i x0 = _mm256_loadu_si256((const __m256i *)above);
__m256i x1 = _mm256_loadu_si256((const __m256i *)(above + 1));
const __m256i x2 = _mm256_loadu_si256((const __m256i *)(above + 2));
const __m256i x3 = _mm256_loadu_si256((const __m256i *)(above + 3));
__m256i y0 = _mm256_avg_epu16(x0, x1);
__m256i y1 = _mm256_avg_epu16(x1, x2);
__m256i u0 = avg3_epu16(&x0, &x1, &x2);
__m256i u1 = avg3_epu16(&x1, &x2, &x3);
D63E_STORE_16X4;
x0 = _mm256_loadu_si256((const __m256i *)(above + 4));
x1 = _mm256_loadu_si256((const __m256i *)(above + 5));
y0 = _mm256_avg_epu16(x2, x3);
y1 = _mm256_avg_epu16(x3, x0);
u0 = avg3_epu16(&x2, &x3, &x0);
u1 = avg3_epu16(&x3, &x0, &x1);
D63E_STORE_16X4;
}
static INLINE void d63e_w16(const uint16_t *above, uint16_t *dst,
ptrdiff_t stride, int num) {
__m256i x0, x1, x2, x3;
__m256i y0, y1, u0, u1;
const int count = (num >> 1) + 2;
x0 = _mm256_loadu_si256((const __m256i *)above);
x1 = _mm256_loadu_si256((const __m256i *)(above + 1));
int i = 2;
do {
x2 = _mm256_loadu_si256((const __m256i *)(above + i++));
x3 = _mm256_loadu_si256((const __m256i *)(above + i++));
y0 = _mm256_avg_epu16(x0, x1);
y1 = _mm256_avg_epu16(x1, x2);
u0 = avg3_epu16(&x0, &x1, &x2);
u1 = avg3_epu16(&x1, &x2, &x3);
D63E_STORE_16X4;
x0 = _mm256_loadu_si256((const __m256i *)(above + i++));
x1 = _mm256_loadu_si256((const __m256i *)(above + i++));
y0 = _mm256_avg_epu16(x2, x3);
y1 = _mm256_avg_epu16(x3, x0);
u0 = avg3_epu16(&x2, &x3, &x0);
u1 = avg3_epu16(&x3, &x0, &x1);
D63E_STORE_16X4;
} while (i < count);
}
void aom_highbd_d63e_predictor_16x16_avx2(uint16_t *dst, ptrdiff_t stride,
const uint16_t *above,
const uint16_t *left, int bd) {
(void)left;
(void)bd;
d63e_w16(above, dst, stride, 16);
}
void aom_highbd_d63e_predictor_16x32_avx2(uint16_t *dst, ptrdiff_t stride,
const uint16_t *above,
const uint16_t *left, int bd) {
(void)left;
(void)bd;
d63e_w16(above, dst, stride, 32);
}
#define D63E_STORE_32X4 \
do { \
_mm256_storeu_si256((__m256i *)dst, y0); \
_mm256_storeu_si256((__m256i *)(dst + 16), z0); \
dst += stride; \
_mm256_storeu_si256((__m256i *)dst, u0); \
_mm256_storeu_si256((__m256i *)(dst + 16), v0); \
dst += stride; \
_mm256_storeu_si256((__m256i *)dst, y1); \
_mm256_storeu_si256((__m256i *)(dst + 16), z1); \
dst += stride; \
_mm256_storeu_si256((__m256i *)dst, u1); \
_mm256_storeu_si256((__m256i *)(dst + 16), v1); \
dst += stride; \
} while (0)
static INLINE void d63e_w32(const uint16_t *above, uint16_t *dst,
ptrdiff_t stride, int num) {
__m256i x0, x1, x2, x3, a0, a1, a2, a3;
__m256i y0, y1, u0, u1, z0, z1, v0, v1;
const int count = (num >> 1) + 2;
x0 = _mm256_loadu_si256((const __m256i *)above);
x1 = _mm256_loadu_si256((const __m256i *)(above + 1));
a0 = _mm256_loadu_si256((const __m256i *)(above + 16));
a1 = _mm256_loadu_si256((const __m256i *)(above + 16 + 1));
int i = 2;
do {
x2 = _mm256_loadu_si256((const __m256i *)(above + i));
a2 = _mm256_loadu_si256((const __m256i *)(above + 16 + i++));
x3 = _mm256_loadu_si256((const __m256i *)(above + i));
a3 = _mm256_loadu_si256((const __m256i *)(above + 16 + i++));
y0 = _mm256_avg_epu16(x0, x1);
y1 = _mm256_avg_epu16(x1, x2);
u0 = avg3_epu16(&x0, &x1, &x2);
u1 = avg3_epu16(&x1, &x2, &x3);
z0 = _mm256_avg_epu16(a0, a1);
z1 = _mm256_avg_epu16(a1, a2);
v0 = avg3_epu16(&a0, &a1, &a2);
v1 = avg3_epu16(&a1, &a2, &a3);
D63E_STORE_32X4;
x0 = _mm256_loadu_si256((const __m256i *)(above + i));
a0 = _mm256_loadu_si256((const __m256i *)(above + 16 + i++));
x1 = _mm256_loadu_si256((const __m256i *)(above + i));
a1 = _mm256_loadu_si256((const __m256i *)(above + 16 + i++));
y0 = _mm256_avg_epu16(x2, x3);
y1 = _mm256_avg_epu16(x3, x0);
u0 = avg3_epu16(&x2, &x3, &x0);
u1 = avg3_epu16(&x3, &x0, &x1);
z0 = _mm256_avg_epu16(a2, a3);
z1 = _mm256_avg_epu16(a3, a0);
v0 = avg3_epu16(&a2, &a3, &a0);
v1 = avg3_epu16(&a3, &a0, &a1);
D63E_STORE_32X4;
} while (i < count);
}
void aom_highbd_d63e_predictor_32x16_avx2(uint16_t *dst, ptrdiff_t stride,
const uint16_t *above,
const uint16_t *left, int bd) {
(void)left;
(void)bd;
d63e_w32(above, dst, stride, 16);
}
void aom_highbd_d63e_predictor_32x32_avx2(uint16_t *dst, ptrdiff_t stride,
const uint16_t *above,
const uint16_t *left, int bd) {
(void)left;
(void)bd;
d63e_w32(above, dst, stride, 32);
}
......@@ -1254,3 +1254,344 @@ void aom_highbd_d45e_predictor_8x16_sse2(uint16_t *dst, ptrdiff_t stride,
y = avg3_epu16(&x0, &x1, &x2);
_mm_store_si128((__m128i *)dst, y);
}
// -----------------------------------------------------------------------------
// D207E_PRED
static INLINE void d207_4x4(const uint16_t *left, uint16_t **dst,
ptrdiff_t stride) {
const __m128i x0 = _mm_loadl_epi64((const __m128i *)left);
const __m128i x1 = _mm_loadl_epi64((const __m128i *)(left + 1));
const __m128i x2 = _mm_loadl_epi64((const __m128i *)(left + 2));
const __m128i x3 = _mm_loadl_epi64((const __m128i *)(left + 3));
const __m128i y0 = _mm_avg_epu16(x0, x1);
const __m128i y1 = _mm_avg_epu16(x1, x2);
const __m128i u0 = avg3_epu16(&x0, &x1, &x2);
const __m128i u1 = avg3_epu16(&x1, &x2, &x3);
const __m128i v0 = _mm_unpacklo_epi16(y0, u0);
const __m128i v1 = _mm_unpacklo_epi16(y1, u1);
_mm_storel_epi64((__m128i *)*dst, v0);
*dst += stride;
_mm_storel_epi64((__m128i *)*dst, v1);
*dst += stride;
_mm_storel_epi64((__m128i *)*dst, _mm_srli_si128(v0, 8));
*dst += stride;
_mm_storel_epi64((__m128i *)*dst, _mm_srli_si128(v1, 8));
*dst += stride;
}
void aom_highbd_d207e_predictor_4x4_sse2(uint16_t *dst, ptrdiff_t stride,
const uint16_t *above,
const uint16_t *left, int bd) {
(void)above;
(void)bd;
d207_4x4(left, &dst, stride);
}
void aom_highbd_d207e_predictor_4x8_sse2(uint16_t *dst, ptrdiff_t stride,
const uint16_t *above,
const uint16_t *left, int bd) {
(void)above;
(void)bd;
d207_4x4(left, &dst, stride);
d207_4x4(left + 4, &dst, stride);
}
static INLINE void d207_8x4(const uint16_t *left, uint16_t **dst,
ptrdiff_t stride) {
const __m128i x0 = _mm_loadl_epi64((const __m128i *)left);
const __m128i x1 = _mm_loadl_epi64((const __m128i *)(left + 1));
const __m128i x2 = _mm_loadl_epi64((const __m128i *)(left + 2));
const __m128i x3 = _mm_loadl_epi64((const __m128i *)(left + 3));
const __m128i x4 = _mm_loadl_epi64((const __m128i *)(left + 4));
const __m128i x5 = _mm_loadl_epi64((const __m128i *)(left + 5));
const __m128i y0 = _mm_avg_epu16(x0, x1);
const __m128i y1 = _mm_avg_epu16(x1, x2);
const __m128i y2 = _mm_avg_epu16(x2, x3);
const __m128i y3 = _mm_avg_epu16(x3, x4);
const __m128i u0 = avg3_epu16(&x0, &x1, &x2);
const __m128i u1 = avg3_epu16(&x1, &x2, &x3);
const __m128i u2 = avg3_epu16(&x2, &x3, &x4);
const __m128i u3 = avg3_epu16(&x3, &x4, &x5);
_mm_store_si128((__m128i *)*dst, _mm_unpacklo_epi16(y0, u0));
*dst += stride;
_mm_store_si128((__m128i *)*dst, _mm_unpacklo_epi16(y1, u1));
*dst += stride;
_mm_store_si128((__m128i *)*dst, _mm_unpacklo_epi16(y2, u2));
*dst += stride;
_mm_store_si128((__m128i *)*dst, _mm_unpacklo_epi16(y3, u3));
*dst += stride;
}
void aom_highbd_d207e_predictor_8x4_sse2(uint16_t *dst, ptrdiff_t stride,
const uint16_t *above,
const uint16_t *left, int bd) {
(void)above;
(void)bd;
d207_8x4(left, &dst, stride);
}
void aom_highbd_d207e_predictor_8x8_sse2(uint16_t *dst, ptrdiff_t stride,
const uint16_t *above,
const uint16_t *left, int bd) {
(void)above;
(void)bd;
d207_8x4(left, &dst, stride);
d207_8x4(left + 4, &dst, stride);
}
void aom_highbd_d207e_predictor_8x16_sse2(uint16_t *dst, ptrdiff_t stride,
const uint16_t *above,
const uint16_t *left, int bd) {
(void)above;
(void)bd;
d207_8x4(left, &dst, stride);
d207_8x4(left + 4, &dst, stride);
d207_8x4(left + 8, &dst, stride);
d207_8x4(left + 12, &dst, stride);
}
static INLINE void d207_16x4(const uint16_t *left, uint16_t **dst,
ptrdiff_t stride) {
const __m128i x0 = _mm_loadu_si128((const __m128i *)left);
const __m128i x1 = _mm_loadu_si128((const __m128i *)(left + 1));
const __m128i x2 = _mm_loadu_si128((const __m128i *)(left + 2));
const __m128i x3 = _mm_loadu_si128((const __m128i *)(left + 3));
const __m128i x4 = _mm_loadu_si128((const __m128i *)(left + 4));
const __m128i x5 = _mm_loadu_si128((const __m128i *)(left + 5));
const __m128i y0 = _mm_avg_epu16(x0, x1);
const __m128i y1 = _mm_avg_epu16(x1, x2);
const __m128i y2 = _mm_avg_epu16(x2, x3);
const __m128i y3 = _mm_avg_epu16(x3, x4);
const __m128i u0 = avg3_epu16(&x0, &x1, &x2);
const __m128i u1 = avg3_epu16(&x1, &x2, &x3);
const __m128i u2 = avg3_epu16(&x2, &x3, &x4);
const __m128i u3 = avg3_epu16(&x3, &x4, &x5);
_mm_store_si128((__m128i *)*dst, _mm_unpacklo_epi16(y0, u0));
_mm_store_si128((__m128i *)(*dst + 8), _mm_unpackhi_epi16(y0, u0));
*dst += stride;
_mm_store_si128((__m128i *)*dst, _mm_unpacklo_epi16(y1, u1));
_mm_store_si128((__m128i *)(*dst + 8), _mm_unpackhi_epi16(y1, u1));
*dst += stride;
_mm_store_si128((__m128i *)*dst, _mm_unpacklo_epi16(y2, u2));
_mm_store_si128((__m128i *)(*dst + 8), _mm_unpackhi_epi16(y2, u2));
*dst += stride;
_mm_store_si128((__m128i *)*dst, _mm_unpacklo_epi16(y3, u3));
_mm_store_si128((__m128i *)(*dst + 8), _mm_unpackhi_epi16(y3, u3));
*dst += stride;
}
void aom_highbd_d207e_predictor_16x8_sse2(uint16_t *dst, ptrdiff_t stride,
const uint16_t *above,
const uint16_t *left, int bd) {
(void)above;
(void)bd;
d207_16x4(left, &dst, stride);
d207_16x4(left + 4, &dst, stride);
}
void aom_highbd_d207e_predictor_16x16_sse2(uint16_t *dst, ptrdiff_t stride,
const uint16_t *above,
const uint16_t *left, int bd) {
(void)above;
(void)bd;
d207_16x4(left, &dst, stride);
d207_16x4(left + 4, &dst, stride);
d207_16x4(left + 8, &dst, stride);
d207_16x4(left + 12, &dst, stride);
}
void aom_highbd_d207e_predictor_16x32_sse2(uint16_t *dst, ptrdiff_t stride,
const uint16_t *above,
const uint16_t *left, int bd) {
(void)above;
(void)bd;
int i;
for (i = 0; i < 32; i += 4) {
d207_16x4(left + i, &dst, stride);
}
}
// -----------------------------------------------------------------------------
// D63E_PRED
void aom_highbd_d63e_predictor_4x4_sse2(uint16_t *dst, ptrdiff_t stride,
const uint16_t *above,
const uint16_t *left, int bd) {
(void)left;
(void)bd;
const __m128i x0 = _mm_loadl_epi64((const __m128i *)above);
const __m128i x1 = _mm_loadl_epi64((const __m128i *)(above + 1));
const __m128i x2 = _mm_loadl_epi64((const __m128i *)(above + 2));
const __m128i x3 = _mm_loadl_epi64((const __m128i *)(above + 3));
const __m128i y0 = _mm_avg_epu16(x0, x1);
const __m128i y1 = _mm_avg_epu16(x1, x2);
const __m128i u0 = avg3_epu16(&x0, &x1, &x2);
const __m128i u1 = avg3_epu16(&x1, &x2, &x3);
_mm_storel_epi64((__m128i *)dst, y0);
dst += stride;
_mm_storel_epi64((__m128i *)dst, u0);
dst += stride;
_mm_storel_epi64((__m128i *)dst, y1);
dst += stride;
_mm_storel_epi64((__m128i *)dst, u1);
}
void aom_highbd_d63e_predictor_4x8_sse2(uint16_t *dst, ptrdiff_t stride,
const uint16_t *above,
const uint16_t *left, int bd) {
(void)left;
(void)bd;
__m128i x0 = _mm_loadl_epi64((const __m128i *)above);
__m128i x1 = _mm_loadl_epi64((const __m128i *)(above + 1));
const __m128i x2 = _mm_loadl_epi64((const __m128i *)(above + 2));
const __m128i x3 = _mm_loadl_epi64((const __m128i *)(above + 3));
__m128i y0 = _mm_avg_epu16(x0, x1);
__m128i y1 = _mm_avg_epu16(x1, x2);
__m128i u0 = avg3_epu16(&x0, &x1, &x2);
__m128i u1 = avg3_epu16(&x1, &x2, &x3);
_mm_storel_epi64((__m128i *)dst, y0);
dst += stride;
_mm_storel_epi64((__m128i *)dst, u0);
dst += stride;
_mm_storel_epi64((__m128i *)dst, y1);
dst += stride;
_mm_storel_epi64((__m128i *)dst, u1);
dst += stride;
x0 = _mm_loadl_epi64((const __m128i *)(above + 4));
x1 = _mm_loadl_epi64((const __m128i *)(above + 5));
y0 = _mm_avg_epu16(x2, x3);
y1 = _mm_avg_epu16(x3, x0);
u0 = avg3_epu16(&x2, &x3, &x0);
u1 = avg3_epu16(&x3, &x0, &x1);
_mm_storel_epi64((__m128i *)dst, y0);
dst += stride;
_mm_storel_epi64((__m128i *)dst, u0);
dst += stride;
_mm_storel_epi64((__m128i *)dst, y1);
dst += stride;
_mm_storel_epi64((__m128i *)dst, u1);
}
#define D63E_STORE_8X4 \
do { \
_mm_store_si128((__m128i *)dst, y0); \
dst += stride; \
_mm_store_si128((__m128i *)dst, u0); \
dst += stride; \
_mm_store_si128((__m128i *)dst, y1); \
dst += stride; \
_mm_store_si128((__m128i *)dst, u1); \
dst += stride; \
} while (0)
void aom_highbd_d63e_predictor_8x4_sse2(uint16_t *dst, ptrdiff_t stride,
const uint16_t *above,
const uint16_t *left, int bd) {
(void)left;
(void)bd;
const __m128i x0 = _mm_load_si128((const __m128i *)above);
const __m128i x1 = _mm_loadu_si128((const __m128i *)(above + 1));
const __m128i x2 = _mm_loadu_si128((const __m128i *)(above + 2));
const __m128i x3 = _mm_loadu_si128((const __m128i *)(above + 3));
const __m128i y0 = _mm_avg_epu16(x0, x1);
const __m128i y1 = _mm_avg_epu16(x1, x2);
const __m128i u0 = avg3_epu16(&x0, &x1, &x2);
const __m128i u1 = avg3_epu16(&x1, &x2, &x3);
_mm_store_si128((__m128i *)dst, y0);
dst += stride;
_mm_store_si128((__m128i *)dst, u0);
dst += stride;
_mm_store_si128((__m128i *)dst, y1);
dst += stride;
_mm_store_si128((__m128i *)dst, u1);
}
void aom_highbd_d63e_predictor_8x8_sse2(uint16_t *dst, ptrdiff_t stride,
const uint16_t *above,
const uint16_t *left, int bd) {
(void)left;
(void)bd;
__m128i x0 = _mm_load_si128((const __m128i *)above);
__m128i x1 = _mm_loadu_si128((const __m128i *)(above + 1));
const __m128i x2 = _mm_loadu_si128((const __m128i *)(above + 2));
const __m128i x3 = _mm_loadu_si128((const __m128i *)(above + 3));
__m128i y0 = _mm_avg_epu16(x0, x1);
__m128i y1 = _mm_avg_epu16(x1, x2);
__m128i u0 = avg3_epu16(&x0, &x1, &x2);
__m128i u1 = avg3_epu16(&x1, &x2, &x3);
D63E_STORE_8X4;