Commit a5f8ea11 authored by Steinar Midtskogen's avatar Steinar Midtskogen Committed by Yaowu Xu
Browse files

Added generic SIMD library supporting x86 SSE2+ and ARM NEON.

Change-Id: I037f4c44f621a7e909b82ccb6a299d41bcbf8607
parent d06588ab
/*
* Copyright (c) 2016, Alliance for Open Media. All rights reserved
*
* This source code is subject to the terms of the BSD 2 Clause License and
* the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
* was not distributed with this source code in the LICENSE file, you can
* obtain it at www.aomedia.org/license/software. If the Alliance for Open
* Media Patent License 1.0 was not distributed with this source code in the
* PATENTS file, you can obtain it at www.aomedia.org/license/patent.
*/
#ifndef _AOM_SIMD_H
#define _AOM_SIMD_H
#ifndef SIMD_INLINE
#ifdef __GNUC__
#define SIMD_INLINE static inline __attribute__((always_inline))
#elif __STDC_VERSION__ >= 199901L
#define SIMD_INLINE static inline
#elif defined(_MSC_VER)
#define SIMD_INLINE static __inline
#else
#define SIMD_INLINE static
#endif
#endif
#include <stdint.h>
#if defined(_WIN32)
#include <intrin.h>
#endif
#include "./aom_config.h"
#if HAVE_NEON
#include "simd/v128_intrinsics_arm.h"
#elif HAVE_SSE2
#include "simd/v128_intrinsics_x86.h"
#else
#include "simd/v128_intrinsics.h"
#endif
#endif /* _AOM_SIMD_H */
/*
* Copyright (c) 2016, Alliance for Open Media. All rights reserved
*
* This source code is subject to the terms of the BSD 2 Clause License and
* the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
* was not distributed with this source code in the LICENSE file, you can
* obtain it at www.aomedia.org/license/software. If the Alliance for Open
* Media Patent License 1.0 was not distributed with this source code in the
* PATENTS file, you can obtain it at www.aomedia.org/license/patent.
*/
#ifndef _V128_INTRINSICS_H
#define _V128_INTRINSICS_H
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include "./v128_intrinsics_c.h"
#include "./v64_intrinsics.h"
/* Fallback to plain, unoptimised C. */
typedef c_v128 v128;
SIMD_INLINE uint32_t v128_low_u32(v128 a) { return c_v128_low_u32(a); }
SIMD_INLINE v64 v128_low_v64(v128 a) { return c_v128_low_v64(a); }
SIMD_INLINE v64 v128_high_v64(v128 a) { return c_v128_high_v64(a); }
SIMD_INLINE v128 v128_from_64(uint64_t hi, uint64_t lo) {
return c_v128_from_64(hi, lo);
}
SIMD_INLINE v128 v128_from_v64(v64 hi, v64 lo) {
return c_v128_from_v64(hi, lo);
}
SIMD_INLINE v128 v128_from_32(uint32_t a, uint32_t b, uint32_t c, uint32_t d) {
return c_v128_from_32(a, b, c, d);
}
SIMD_INLINE v128 v128_load_unaligned(const void *p) {
return c_v128_load_unaligned(p);
}
SIMD_INLINE v128 v128_load_aligned(const void *p) {
return c_v128_load_aligned(p);
}
SIMD_INLINE void v128_store_unaligned(void *p, v128 a) {
c_v128_store_unaligned(p, a);
}
SIMD_INLINE void v128_store_aligned(void *p, v128 a) {
c_v128_store_aligned(p, a);
}
SIMD_INLINE v128 v128_align(v128 a, v128 b, const unsigned int c) {
return c_v128_align(a, b, c);
}
SIMD_INLINE v128 v128_zero() { return c_v128_zero(); }
SIMD_INLINE v128 v128_dup_8(uint8_t x) { return c_v128_dup_8(x); }
SIMD_INLINE v128 v128_dup_16(uint16_t x) { return c_v128_dup_16(x); }
SIMD_INLINE v128 v128_dup_32(uint32_t x) { return c_v128_dup_32(x); }
typedef uint32_t sad128_internal;
SIMD_INLINE sad128_internal v128_sad_u8_init() { return c_v128_sad_u8_init(); }
SIMD_INLINE sad128_internal v128_sad_u8(sad128_internal s, v128 a, v128 b) {
return c_v128_sad_u8(s, a, b);
}
SIMD_INLINE uint32_t v128_sad_u8_sum(sad128_internal s) {
return c_v128_sad_u8_sum(s);
}
typedef uint32_t ssd128_internal;
SIMD_INLINE ssd128_internal v128_ssd_u8_init() { return c_v128_ssd_u8_init(); }
SIMD_INLINE ssd128_internal v128_ssd_u8(ssd128_internal s, v128 a, v128 b) {
return c_v128_ssd_u8(s, a, b);
}
SIMD_INLINE uint32_t v128_ssd_u8_sum(ssd128_internal s) {
return c_v128_ssd_u8_sum(s);
}
SIMD_INLINE int64_t v128_dotp_s16(v128 a, v128 b) {
return c_v128_dotp_s16(a, b);
}
SIMD_INLINE uint64_t v128_hadd_u8(v128 a) { return c_v128_hadd_u8(a); }
SIMD_INLINE v128 v128_or(v128 a, v128 b) { return c_v128_or(a, b); }
SIMD_INLINE v128 v128_xor(v128 a, v128 b) { return c_v128_xor(a, b); }
SIMD_INLINE v128 v128_and(v128 a, v128 b) { return c_v128_and(a, b); }
SIMD_INLINE v128 v128_andn(v128 a, v128 b) { return c_v128_andn(a, b); }
SIMD_INLINE v128 v128_add_8(v128 a, v128 b) { return c_v128_add_8(a, b); }
SIMD_INLINE v128 v128_add_16(v128 a, v128 b) { return c_v128_add_16(a, b); }
SIMD_INLINE v128 v128_sadd_s16(v128 a, v128 b) { return c_v128_sadd_s16(a, b); }
SIMD_INLINE v128 v128_add_32(v128 a, v128 b) { return c_v128_add_32(a, b); }
SIMD_INLINE v128 v128_padd_s16(v128 a) { return c_v128_padd_s16(a); }
SIMD_INLINE v128 v128_sub_8(v128 a, v128 b) { return c_v128_sub_8(a, b); }
SIMD_INLINE v128 v128_ssub_u8(v128 a, v128 b) { return c_v128_ssub_u8(a, b); }
SIMD_INLINE v128 v128_ssub_s8(v128 a, v128 b) { return c_v128_ssub_s8(a, b); }
SIMD_INLINE v128 v128_sub_16(v128 a, v128 b) { return c_v128_sub_16(a, b); }
SIMD_INLINE v128 v128_ssub_s16(v128 a, v128 b) { return c_v128_ssub_s16(a, b); }
SIMD_INLINE v128 v128_sub_32(v128 a, v128 b) { return c_v128_sub_32(a, b); }
SIMD_INLINE v128 v128_abs_s16(v128 a) { return c_v128_abs_s16(a); }
SIMD_INLINE v128 v128_mul_s16(v64 a, v64 b) { return c_v128_mul_s16(a, b); }
SIMD_INLINE v128 v128_mullo_s16(v128 a, v128 b) {
return c_v128_mullo_s16(a, b);
}
SIMD_INLINE v128 v128_mulhi_s16(v128 a, v128 b) {
return c_v128_mulhi_s16(a, b);
}
SIMD_INLINE v128 v128_mullo_s32(v128 a, v128 b) {
return c_v128_mullo_s32(a, b);
}
SIMD_INLINE v128 v128_madd_s16(v128 a, v128 b) { return c_v128_madd_s16(a, b); }
SIMD_INLINE v128 v128_madd_us8(v128 a, v128 b) { return c_v128_madd_us8(a, b); }
SIMD_INLINE v128 v128_avg_u8(v128 a, v128 b) { return c_v128_avg_u8(a, b); }
SIMD_INLINE v128 v128_rdavg_u8(v128 a, v128 b) { return c_v128_rdavg_u8(a, b); }
SIMD_INLINE v128 v128_avg_u16(v128 a, v128 b) { return c_v128_avg_u16(a, b); }
SIMD_INLINE v128 v128_min_u8(v128 a, v128 b) { return c_v128_min_u8(a, b); }
SIMD_INLINE v128 v128_max_u8(v128 a, v128 b) { return c_v128_max_u8(a, b); }
SIMD_INLINE v128 v128_min_s8(v128 a, v128 b) { return c_v128_min_s8(a, b); }
SIMD_INLINE v128 v128_max_s8(v128 a, v128 b) { return c_v128_max_s8(a, b); }
SIMD_INLINE v128 v128_min_s16(v128 a, v128 b) { return c_v128_min_s16(a, b); }
SIMD_INLINE v128 v128_max_s16(v128 a, v128 b) { return c_v128_max_s16(a, b); }
SIMD_INLINE v128 v128_ziplo_8(v128 a, v128 b) { return c_v128_ziplo_8(a, b); }
SIMD_INLINE v128 v128_ziphi_8(v128 a, v128 b) { return c_v128_ziphi_8(a, b); }
SIMD_INLINE v128 v128_ziplo_16(v128 a, v128 b) { return c_v128_ziplo_16(a, b); }
SIMD_INLINE v128 v128_ziphi_16(v128 a, v128 b) { return c_v128_ziphi_16(a, b); }
SIMD_INLINE v128 v128_ziplo_32(v128 a, v128 b) { return c_v128_ziplo_32(a, b); }
SIMD_INLINE v128 v128_ziphi_32(v128 a, v128 b) { return c_v128_ziphi_32(a, b); }
SIMD_INLINE v128 v128_ziplo_64(v128 a, v128 b) { return c_v128_ziplo_64(a, b); }
SIMD_INLINE v128 v128_ziphi_64(v128 a, v128 b) { return c_v128_ziphi_64(a, b); }
SIMD_INLINE v128 v128_zip_8(v64 a, v64 b) { return c_v128_zip_8(a, b); }
SIMD_INLINE v128 v128_zip_16(v64 a, v64 b) { return c_v128_zip_16(a, b); }
SIMD_INLINE v128 v128_zip_32(v64 a, v64 b) { return c_v128_zip_32(a, b); }
SIMD_INLINE v128 v128_unziplo_8(v128 a, v128 b) {
return c_v128_unziplo_8(a, b);
}
SIMD_INLINE v128 v128_unziphi_8(v128 a, v128 b) {
return c_v128_unziphi_8(a, b);
}
SIMD_INLINE v128 v128_unziplo_16(v128 a, v128 b) {
return c_v128_unziplo_16(a, b);
}
SIMD_INLINE v128 v128_unziphi_16(v128 a, v128 b) {
return c_v128_unziphi_16(a, b);
}
SIMD_INLINE v128 v128_unziplo_32(v128 a, v128 b) {
return c_v128_unziplo_32(a, b);
}
SIMD_INLINE v128 v128_unziphi_32(v128 a, v128 b) {
return c_v128_unziphi_32(a, b);
}
SIMD_INLINE v128 v128_unpack_u8_s16(v64 a) { return c_v128_unpack_u8_s16(a); }
SIMD_INLINE v128 v128_unpacklo_u8_s16(v128 a) {
return c_v128_unpacklo_u8_s16(a);
}
SIMD_INLINE v128 v128_unpackhi_u8_s16(v128 a) {
return c_v128_unpackhi_u8_s16(a);
}
SIMD_INLINE v128 v128_pack_s32_s16(v128 a, v128 b) {
return c_v128_pack_s32_s16(a, b);
}
SIMD_INLINE v128 v128_pack_s16_u8(v128 a, v128 b) {
return c_v128_pack_s16_u8(a, b);
}
SIMD_INLINE v128 v128_pack_s16_s8(v128 a, v128 b) {
return c_v128_pack_s16_s8(a, b);
}
SIMD_INLINE v128 v128_unpack_u16_s32(v64 a) { return c_v128_unpack_u16_s32(a); }
SIMD_INLINE v128 v128_unpack_s16_s32(v64 a) { return c_v128_unpack_s16_s32(a); }
SIMD_INLINE v128 v128_unpacklo_u16_s32(v128 a) {
return c_v128_unpacklo_u16_s32(a);
}
SIMD_INLINE v128 v128_unpacklo_s16_s32(v128 a) {
return c_v128_unpacklo_s16_s32(a);
}
SIMD_INLINE v128 v128_unpackhi_u16_s32(v128 a) {
return c_v128_unpackhi_u16_s32(a);
}
SIMD_INLINE v128 v128_unpackhi_s16_s32(v128 a) {
return c_v128_unpackhi_s16_s32(a);
}
SIMD_INLINE v128 v128_shuffle_8(v128 a, v128 pattern) {
return c_v128_shuffle_8(a, pattern);
}
SIMD_INLINE v128 v128_cmpgt_s8(v128 a, v128 b) { return c_v128_cmpgt_s8(a, b); }
SIMD_INLINE v128 v128_cmplt_s8(v128 a, v128 b) { return c_v128_cmplt_s8(a, b); }
SIMD_INLINE v128 v128_cmpeq_8(v128 a, v128 b) { return c_v128_cmpeq_8(a, b); }
SIMD_INLINE v128 v128_cmpgt_s16(v128 a, v128 b) {
return c_v128_cmpgt_s16(a, b);
}
SIMD_INLINE v128 v128_cmplt_s16(v128 a, v128 b) {
return c_v128_cmplt_s16(a, b);
}
SIMD_INLINE v128 v128_cmpeq_16(v128 a, v128 b) { return c_v128_cmpeq_16(a, b); }
SIMD_INLINE v128 v128_shl_8(v128 a, unsigned int c) {
return c_v128_shl_8(a, c);
}
SIMD_INLINE v128 v128_shr_u8(v128 a, unsigned int c) {
return c_v128_shr_u8(a, c);
}
SIMD_INLINE v128 v128_shr_s8(v128 a, unsigned int c) {
return c_v128_shr_s8(a, c);
}
SIMD_INLINE v128 v128_shl_16(v128 a, unsigned int c) {
return c_v128_shl_16(a, c);
}
SIMD_INLINE v128 v128_shr_u16(v128 a, unsigned int c) {
return c_v128_shr_u16(a, c);
}
SIMD_INLINE v128 v128_shr_s16(v128 a, unsigned int c) {
return c_v128_shr_s16(a, c);
}
SIMD_INLINE v128 v128_shl_32(v128 a, unsigned int c) {
return c_v128_shl_32(a, c);
}
SIMD_INLINE v128 v128_shr_u32(v128 a, unsigned int c) {
return c_v128_shr_u32(a, c);
}
SIMD_INLINE v128 v128_shr_s32(v128 a, unsigned int c) {
return c_v128_shr_s32(a, c);
}
SIMD_INLINE v128 v128_shr_n_byte(v128 a, const unsigned int n) {
return c_v128_shr_n_byte(a, n);
}
SIMD_INLINE v128 v128_shl_n_byte(v128 a, const unsigned int n) {
return c_v128_shl_n_byte(a, n);
}
SIMD_INLINE v128 v128_shl_n_8(v128 a, const unsigned int n) {
return c_v128_shl_n_8(a, n);
}
SIMD_INLINE v128 v128_shl_n_16(v128 a, const unsigned int n) {
return c_v128_shl_n_16(a, n);
}
SIMD_INLINE v128 v128_shl_n_32(v128 a, const unsigned int n) {
return c_v128_shl_n_32(a, n);
}
SIMD_INLINE v128 v128_shr_n_u8(v128 a, const unsigned int n) {
return c_v128_shr_n_u8(a, n);
}
SIMD_INLINE v128 v128_shr_n_u16(v128 a, const unsigned int n) {
return c_v128_shr_n_u16(a, n);
}
SIMD_INLINE v128 v128_shr_n_u32(v128 a, const unsigned int n) {
return c_v128_shr_n_u32(a, n);
}
SIMD_INLINE v128 v128_shr_n_s8(v128 a, const unsigned int n) {
return c_v128_shr_n_s8(a, n);
}
SIMD_INLINE v128 v128_shr_n_s16(v128 a, const unsigned int n) {
return c_v128_shr_n_s16(a, n);
}
SIMD_INLINE v128 v128_shr_n_s32(v128 a, const unsigned int n) {
return c_v128_shr_n_s32(a, n);
}
#endif /* _V128_INTRINSICS_H */
This diff is collapsed.
This diff is collapsed.
/*
* Copyright (c) 2016, Alliance for Open Media. All rights reserved
*
* This source code is subject to the terms of the BSD 2 Clause License and
* the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
* was not distributed with this source code in the LICENSE file, you can
* obtain it at www.aomedia.org/license/software. If the Alliance for Open
* Media Patent License 1.0 was not distributed with this source code in the
* PATENTS file, you can obtain it at www.aomedia.org/license/patent.
*/
#ifndef _V128_INTRINSICS_H
#define _V128_INTRINSICS_H
#include "./v64_intrinsics_x86.h"
typedef __m128i v128;
SIMD_INLINE uint32_t v128_low_u32(v128 a) {
return (uint32_t)_mm_cvtsi128_si32(a);
}
SIMD_INLINE v64 v128_low_v64(v128 a) {
return _mm_unpacklo_epi64(a, v64_zero());
}
SIMD_INLINE v64 v128_high_v64(v128 a) { return _mm_srli_si128(a, 8); }
SIMD_INLINE v128 v128_from_v64(v64 a, v64 b) {
return _mm_unpacklo_epi64(b, a);
}
SIMD_INLINE v128 v128_from_64(uint64_t a, uint64_t b) {
return v128_from_v64(v64_from_64(a), v64_from_64(b));
}
SIMD_INLINE v128 v128_from_32(uint32_t a, uint32_t b, uint32_t c, uint32_t d) {
return _mm_set_epi32(a, b, c, d);
}
SIMD_INLINE v128 v128_load_aligned(const void *p) {
return _mm_load_si128((__m128i *)p);
}
SIMD_INLINE v128 v128_load_unaligned(const void *p) {
#if defined(__SSSE3__)
return (__m128i)_mm_lddqu_si128((__m128i *)p);
#else
return _mm_loadu_si128((__m128i *)p);
#endif
}
SIMD_INLINE void v128_store_aligned(void *p, v128 a) {
_mm_store_si128((__m128i *)p, a);
}
SIMD_INLINE void v128_store_unaligned(void *p, v128 a) {
_mm_storeu_si128((__m128i *)p, a);
}
#if defined(__OPTIMIZE__)
#if defined(__SSSE3__)
SIMD_INLINE v128 v128_align(v128 a, v128 b, const unsigned int c) {
return c ? _mm_alignr_epi8(a, b, c) : b;
}
#else
#define v128_align(a, b, c) \
((c) ? _mm_or_si128(_mm_srli_si128(b, c), _mm_slli_si128(a, 16 - (c))) : (b))
#endif
#else
#if defined(__SSSE3__)
#define v128_align(a, b, c) ((c) ? _mm_alignr_epi8(a, b, c) : (b))
#else
#define v128_align(a, b, c) \
((c) ? _mm_or_si128(_mm_srli_si128(b, c), _mm_slli_si128(a, 16 - (c))) : (b))
#endif
#endif
SIMD_INLINE v128 v128_zero() { return _mm_setzero_si128(); }
SIMD_INLINE v128 v128_dup_8(uint8_t x) { return _mm_set1_epi8(x); }
SIMD_INLINE v128 v128_dup_16(uint16_t x) { return _mm_set1_epi16(x); }
SIMD_INLINE v128 v128_dup_32(uint32_t x) { return _mm_set1_epi32(x); }
SIMD_INLINE v128 v128_add_8(v128 a, v128 b) { return _mm_add_epi8(a, b); }
SIMD_INLINE v128 v128_add_16(v128 a, v128 b) { return _mm_add_epi16(a, b); }
SIMD_INLINE v128 v128_sadd_s16(v128 a, v128 b) { return _mm_adds_epi16(a, b); }
SIMD_INLINE v128 v128_add_32(v128 a, v128 b) { return _mm_add_epi32(a, b); }
SIMD_INLINE v128 v128_padd_s16(v128 a) {
return _mm_madd_epi16(a, _mm_set1_epi16(1));
}
SIMD_INLINE v128 v128_sub_8(v128 a, v128 b) { return _mm_sub_epi8(a, b); }
SIMD_INLINE v128 v128_ssub_u8(v128 a, v128 b) { return _mm_subs_epu8(a, b); }
SIMD_INLINE v128 v128_ssub_s8(v128 a, v128 b) { return _mm_subs_epi8(a, b); }
SIMD_INLINE v128 v128_sub_16(v128 a, v128 b) { return _mm_sub_epi16(a, b); }
SIMD_INLINE v128 v128_ssub_s16(v128 a, v128 b) { return _mm_subs_epi16(a, b); }
SIMD_INLINE v128 v128_sub_32(v128 a, v128 b) { return _mm_sub_epi32(a, b); }
SIMD_INLINE v128 v128_abs_s16(v128 a) {
#if defined(__SSSE3__)
return _mm_abs_epi16(a);
#else
return _mm_max_epi16(a, _mm_sub_epi16(_mm_setzero_si128(), a));
#endif
}
SIMD_INLINE v128 v128_ziplo_8(v128 a, v128 b) {
return _mm_unpacklo_epi8(b, a);
}
SIMD_INLINE v128 v128_ziphi_8(v128 a, v128 b) {
return _mm_unpackhi_epi8(b, a);
}
SIMD_INLINE v128 v128_ziplo_16(v128 a, v128 b) {
return _mm_unpacklo_epi16(b, a);
}
SIMD_INLINE v128 v128_ziphi_16(v128 a, v128 b) {
return _mm_unpackhi_epi16(b, a);
}
SIMD_INLINE v128 v128_ziplo_32(v128 a, v128 b) {
return _mm_unpacklo_epi32(b, a);
}
SIMD_INLINE v128 v128_ziphi_32(v128 a, v128 b) {
return _mm_unpackhi_epi32(b, a);
}
SIMD_INLINE v128 v128_ziplo_64(v128 a, v128 b) {
return _mm_unpacklo_epi64(b, a);
}
SIMD_INLINE v128 v128_ziphi_64(v128 a, v128 b) {
return _mm_unpackhi_epi64(b, a);
}
SIMD_INLINE v128 v128_zip_8(v64 a, v64 b) { return _mm_unpacklo_epi8(b, a); }
SIMD_INLINE v128 v128_zip_16(v64 a, v64 b) { return _mm_unpacklo_epi16(b, a); }
SIMD_INLINE v128 v128_zip_32(v64 a, v64 b) { return _mm_unpacklo_epi32(b, a); }
SIMD_INLINE v128 v128_unziphi_8(v128 a, v128 b) {
return _mm_packs_epi16(_mm_srai_epi16(b, 8), _mm_srai_epi16(a, 8));
}
SIMD_INLINE v128 v128_unziplo_8(v128 a, v128 b) {
#if defined(__SSSE3__)
v128 order = _mm_cvtsi64_si128(0x0e0c0a0806040200LL);
return _mm_unpacklo_epi64(_mm_shuffle_epi8(b, order),
_mm_shuffle_epi8(a, order));
#else
return v128_unziphi_8(_mm_slli_si128(a, 1), _mm_slli_si128(b, 1));
#endif
}
SIMD_INLINE v128 v128_unziphi_16(v128 a, v128 b) {
return _mm_packs_epi32(_mm_srai_epi32(b, 16), _mm_srai_epi32(a, 16));
}
SIMD_INLINE v128 v128_unziplo_16(v128 a, v128 b) {
#if defined(__SSSE3__)
v128 order = _mm_cvtsi64_si128(0x0d0c090805040100LL);
return _mm_unpacklo_epi64(_mm_shuffle_epi8(b, order),
_mm_shuffle_epi8(a, order));
#else
return v128_unziphi_16(_mm_slli_si128(a, 2), _mm_slli_si128(b, 2));
#endif
}
SIMD_INLINE v128 v128_unziphi_32(v128 a, v128 b) {
return _mm_castps_si128(_mm_shuffle_ps(
_mm_castsi128_ps(b), _mm_castsi128_ps(a), _MM_SHUFFLE(3, 1, 3, 1)));
}
SIMD_INLINE v128 v128_unziplo_32(v128 a, v128 b) {
return _mm_castps_si128(_mm_shuffle_ps(
_mm_castsi128_ps(b), _mm_castsi128_ps(a), _MM_SHUFFLE(2, 0, 2, 0)));
}
SIMD_INLINE v128 v128_unpack_u8_s16(v64 a) {
return _mm_unpacklo_epi8(a, _mm_setzero_si128());
}
SIMD_INLINE v128 v128_unpacklo_u8_s16(v128 a) {
return _mm_unpacklo_epi8(a, _mm_setzero_si128());
}
SIMD_INLINE v128 v128_unpackhi_u8_s16(v128 a) {
return _mm_unpackhi_epi8(a, _mm_setzero_si128());
}
SIMD_INLINE v128 v128_pack_s32_s16(v128 a, v128 b) {
return _mm_packs_epi32(b, a);
}
SIMD_INLINE v128 v128_pack_s16_u8(v128 a, v128 b) {
return _mm_packus_epi16(b, a);
}
SIMD_INLINE v128 v128_pack_s16_s8(v128 a, v128 b) {
return _mm_packs_epi16(b, a);
}
SIMD_INLINE v128 v128_unpack_u16_s32(v64 a) {
return _mm_unpacklo_epi16(a, _mm_setzero_si128());
}
SIMD_INLINE v128 v128_unpack_s16_s32(v64 a) {
return _mm_srai_epi32(_mm_unpacklo_epi16(a, a), 16);
}
SIMD_INLINE v128 v128_unpacklo_u16_s32(v128 a) {
return _mm_unpacklo_epi16(a, _mm_setzero_si128());
}
SIMD_INLINE v128 v128_unpacklo_s16_s32(v128 a) {
return _mm_srai_epi32(_mm_unpacklo_epi16(a, a), 16);
}
SIMD_INLINE v128 v128_unpackhi_u16_s32(v128 a) {
return _mm_unpackhi_epi16(a, _mm_setzero_si128());
}
SIMD_INLINE v128 v128_unpackhi_s16_s32(v128 a) {
return _mm_srai_epi32(_mm_unpackhi_epi16(a, a), 16);
}
SIMD_INLINE v128 v128_shuffle_8(v128 x, v128 pattern) {
#if defined(__SSSE3__)
return _mm_shuffle_epi8(x, pattern);
#else
v128 output;
unsigned char *input = (unsigned char *)&x;
unsigned char *index = (unsigned char *)&pattern;
char *selected = (char *)&output;
int counter;
for (counter = 0; counter < 16; counter++) {
selected[counter] = input[index[counter] & 15];
}
return output;
#endif
}
SIMD_INLINE int64_t v128_dotp_s16(v128 a, v128 b) {
v128 r = _mm_madd_epi16(a, b);
#if defined(__SSE4_1__)
v128 c = _mm_add_epi64(_mm_cvtepi32_epi64(r),
_mm_cvtepi32_epi64(_mm_srli_si128(r, 8)));
return _mm_cvtsi128_si64(_mm_add_epi64(c, _mm_srli_si128(c, 8)));
#else
return (int64_t)_mm_cvtsi128_si32(r) +
(int64_t)_mm_cvtsi128_si32(_mm_srli_si128(r, 4)) +
(int64_t)_mm_cvtsi128_si32(_mm_srli_si128(r, 8)) +
(int64_t)_mm_cvtsi128_si32(_mm_srli_si128(r, 12));
#endif
}
SIMD_INLINE uint64_t v128_hadd_u8(v128 a) {
v128 t = _mm_sad_epu8(a, _mm_setzero_si128());
return v64_low_u32(v128_low_v64(t)) + v64_low_u32(v128_high_v64(t));
}
typedef v128 sad128_internal;
SIMD_INLINE sad128_internal v128_sad_u8_init() { return _mm_setzero_si128(); }
/* Implementation dependent return value. Result must be finalised with
v128_sad_sum().
The result for more than 32 v128_sad_u8() calls is undefined. */
SIMD_INLINE sad128_internal v128_sad_u8(sad128_internal s, v128 a, v128 b) {