Commit 032573dd authored by Yaowu Xu's avatar Yaowu Xu Committed by Fred BARBIER

Cleanup: remove const

Similar to previous commit, this cleanup removes more "const"s for
parameters passed by value

BUG=aomedia:448

Change-Id: I092bcbeecab75f0c14c3ee60d34dcf6f69034fe4
parent c30934bc
......@@ -49,7 +49,7 @@ SIMD_INLINE void v128_store_aligned(void *p, v128 a) {
c_v128_store_aligned(p, a);
}
SIMD_INLINE v128 v128_align(v128 a, v128 b, const unsigned int c) {
SIMD_INLINE v128 v128_align(v128 a, v128 b, unsigned int c) {
return c_v128_align(a, b, c);
}
......@@ -231,37 +231,37 @@ SIMD_INLINE v128 v128_shr_s32(v128 a, unsigned int c) {
return c_v128_shr_s32(a, c);
}
SIMD_INLINE v128 v128_shr_n_byte(v128 a, const unsigned int n) {
SIMD_INLINE v128 v128_shr_n_byte(v128 a, unsigned int n) {
return c_v128_shr_n_byte(a, n);
}
SIMD_INLINE v128 v128_shl_n_byte(v128 a, const unsigned int n) {
SIMD_INLINE v128 v128_shl_n_byte(v128 a, unsigned int n) {
return c_v128_shl_n_byte(a, n);
}
SIMD_INLINE v128 v128_shl_n_8(v128 a, const unsigned int n) {
SIMD_INLINE v128 v128_shl_n_8(v128 a, unsigned int n) {
return c_v128_shl_n_8(a, n);
}
SIMD_INLINE v128 v128_shl_n_16(v128 a, const unsigned int n) {
SIMD_INLINE v128 v128_shl_n_16(v128 a, unsigned int n) {
return c_v128_shl_n_16(a, n);
}
SIMD_INLINE v128 v128_shl_n_32(v128 a, const unsigned int n) {
SIMD_INLINE v128 v128_shl_n_32(v128 a, unsigned int n) {
return c_v128_shl_n_32(a, n);
}
SIMD_INLINE v128 v128_shr_n_u8(v128 a, const unsigned int n) {
SIMD_INLINE v128 v128_shr_n_u8(v128 a, unsigned int n) {
return c_v128_shr_n_u8(a, n);
}
SIMD_INLINE v128 v128_shr_n_u16(v128 a, const unsigned int n) {
SIMD_INLINE v128 v128_shr_n_u16(v128 a, unsigned int n) {
return c_v128_shr_n_u16(a, n);
}
SIMD_INLINE v128 v128_shr_n_u32(v128 a, const unsigned int n) {
SIMD_INLINE v128 v128_shr_n_u32(v128 a, unsigned int n) {
return c_v128_shr_n_u32(a, n);
}
SIMD_INLINE v128 v128_shr_n_s8(v128 a, const unsigned int n) {
SIMD_INLINE v128 v128_shr_n_s8(v128 a, unsigned int n) {
return c_v128_shr_n_s8(a, n);
}
SIMD_INLINE v128 v128_shr_n_s16(v128 a, const unsigned int n) {
SIMD_INLINE v128 v128_shr_n_s16(v128 a, unsigned int n) {
return c_v128_shr_n_s16(a, n);
}
SIMD_INLINE v128 v128_shr_n_s32(v128 a, const unsigned int n) {
SIMD_INLINE v128 v128_shr_n_s32(v128 a, unsigned int n) {
return c_v128_shr_n_s32(a, n);
}
......
......@@ -51,7 +51,7 @@ SIMD_INLINE void v128_store_unaligned(void *p, v128 r) {
vst1q_u8((uint8_t *)p, vreinterpretq_u8_s64(r));
}
SIMD_INLINE v128 v128_align(v128 a, v128 b, const unsigned int c) {
SIMD_INLINE v128 v128_align(v128 a, v128 b, unsigned int c) {
// The following functions require an immediate.
// Some compilers will check this during optimisation, others wont.
#if defined(__OPTIMIZE__) && __OPTIMIZE__ && !defined(__clang__)
......@@ -541,7 +541,7 @@ SIMD_INLINE v128 v128_shr_s32(v128 a, unsigned int c) {
#if defined(__OPTIMIZE__) && __OPTIMIZE__ && !defined(__clang__)
SIMD_INLINE v128 v128_shl_n_byte(v128 a, const unsigned int n) {
SIMD_INLINE v128 v128_shl_n_byte(v128 a, unsigned int n) {
return n < 8
? v128_from_64(
(uint64_t)vorr_u64(
......@@ -559,7 +559,7 @@ SIMD_INLINE v128 v128_shl_n_byte(v128 a, const unsigned int n) {
0));
}
SIMD_INLINE v128 v128_shr_n_byte(v128 a, const unsigned int n) {
SIMD_INLINE v128 v128_shr_n_byte(v128 a, unsigned int n) {
return n < 8
? v128_from_64(
vshr_n_u64(vreinterpret_u64_s64(vget_high_s64(a)), n * 8),
......@@ -574,45 +574,45 @@ SIMD_INLINE v128 v128_shr_n_byte(v128 a, const unsigned int n) {
(n - 8) * 8)));
}
SIMD_INLINE v128 v128_shl_n_8(v128 a, const unsigned int c) {
SIMD_INLINE v128 v128_shl_n_8(v128 a, unsigned int c) {
return vreinterpretq_s64_u8(vshlq_n_u8(vreinterpretq_u8_s64(a), c));
}
SIMD_INLINE v128 v128_shr_n_u8(v128 a, const unsigned int c) {
SIMD_INLINE v128 v128_shr_n_u8(v128 a, unsigned int c) {
return vreinterpretq_s64_u8(vshrq_n_u8(vreinterpretq_u8_s64(a), c));
}
SIMD_INLINE v128 v128_shr_n_s8(v128 a, const unsigned int c) {
SIMD_INLINE v128 v128_shr_n_s8(v128 a, unsigned int c) {
return vreinterpretq_s64_s8(vshrq_n_s8(vreinterpretq_s8_s64(a), c));
}
SIMD_INLINE v128 v128_shl_n_16(v128 a, const unsigned int c) {
SIMD_INLINE v128 v128_shl_n_16(v128 a, unsigned int c) {
return vreinterpretq_s64_u16(vshlq_n_u16(vreinterpretq_u16_s64(a), c));
}
SIMD_INLINE v128 v128_shr_n_u16(v128 a, const unsigned int c) {
SIMD_INLINE v128 v128_shr_n_u16(v128 a, unsigned int c) {
return vreinterpretq_s64_u16(vshrq_n_u16(vreinterpretq_u16_s64(a), c));
}
SIMD_INLINE v128 v128_shr_n_s16(v128 a, const unsigned int c) {
SIMD_INLINE v128 v128_shr_n_s16(v128 a, unsigned int c) {
return vreinterpretq_s64_s16(vshrq_n_s16(vreinterpretq_s16_s64(a), c));
}
SIMD_INLINE v128 v128_shl_n_32(v128 a, const unsigned int c) {
SIMD_INLINE v128 v128_shl_n_32(v128 a, unsigned int c) {
return vreinterpretq_s64_u32(vshlq_n_u32(vreinterpretq_u32_s64(a), c));
}
SIMD_INLINE v128 v128_shr_n_u32(v128 a, const unsigned int c) {
SIMD_INLINE v128 v128_shr_n_u32(v128 a, unsigned int c) {
return vreinterpretq_s64_u32(vshrq_n_u32(vreinterpretq_u32_s64(a), c));
}
SIMD_INLINE v128 v128_shr_n_s32(v128 a, const unsigned int c) {
SIMD_INLINE v128 v128_shr_n_s32(v128 a, unsigned int c) {
return vreinterpretq_s64_s32(vshrq_n_s32(vreinterpretq_s32_s64(a), c));
}
#else
SIMD_INLINE v128 v128_shl_n_byte(v128 a, const unsigned int n) {
SIMD_INLINE v128 v128_shl_n_byte(v128 a, unsigned int n) {
if (n < 8)
return v128_from_v64(v64_or(v64_shl_n_byte(v128_high_v64(a), n),
v64_shr_n_byte(v128_low_v64(a), 8 - n)),
......@@ -621,7 +621,7 @@ SIMD_INLINE v128 v128_shl_n_byte(v128 a, const unsigned int n) {
return v128_from_v64(v64_shl_n_byte(v128_low_v64(a), n - 8), v64_zero());
}
SIMD_INLINE v128 v128_shr_n_byte(v128 a, const unsigned int n) {
SIMD_INLINE v128 v128_shr_n_byte(v128 a, unsigned int n) {
if (n < 8)
return v128_from_v64(v64_shr_n_byte(v128_high_v64(a), n),
v64_or(v64_shr_n_byte(v128_low_v64(a), n),
......@@ -630,39 +630,39 @@ SIMD_INLINE v128 v128_shr_n_byte(v128 a, const unsigned int n) {
return v128_from_v64(v64_zero(), v64_shr_n_byte(v128_high_v64(a), n - 8));
}
SIMD_INLINE v128 v128_shl_n_8(v128 a, const unsigned int c) {
SIMD_INLINE v128 v128_shl_n_8(v128 a, unsigned int c) {
return v128_shl_8(a, c);
}
SIMD_INLINE v128 v128_shr_n_u8(v128 a, const unsigned int c) {
SIMD_INLINE v128 v128_shr_n_u8(v128 a, unsigned int c) {
return v128_shr_u8(a, c);
}
SIMD_INLINE v128 v128_shr_n_s8(v128 a, const unsigned int c) {
SIMD_INLINE v128 v128_shr_n_s8(v128 a, unsigned int c) {
return v128_shr_s8(a, c);
}
SIMD_INLINE v128 v128_shl_n_16(v128 a, const unsigned int c) {
SIMD_INLINE v128 v128_shl_n_16(v128 a, unsigned int c) {
return v128_shl_16(a, c);
}
SIMD_INLINE v128 v128_shr_n_u16(v128 a, const unsigned int c) {
SIMD_INLINE v128 v128_shr_n_u16(v128 a, unsigned int c) {
return v128_shr_u16(a, c);
}
SIMD_INLINE v128 v128_shr_n_s16(v128 a, const unsigned int c) {
SIMD_INLINE v128 v128_shr_n_s16(v128 a, unsigned int c) {
return v128_shr_s16(a, c);
}
SIMD_INLINE v128 v128_shl_n_32(v128 a, const unsigned int c) {
SIMD_INLINE v128 v128_shl_n_32(v128 a, unsigned int c) {
return v128_shl_32(a, c);
}
SIMD_INLINE v128 v128_shr_n_u32(v128 a, const unsigned int c) {
SIMD_INLINE v128 v128_shr_n_u32(v128 a, unsigned int c) {
return v128_shr_u32(a, c);
}
SIMD_INLINE v128 v128_shr_n_s32(v128 a, const unsigned int c) {
SIMD_INLINE v128 v128_shr_n_s32(v128 a, unsigned int c) {
return v128_shr_s32(a, c);
}
......
......@@ -601,7 +601,7 @@ SIMD_INLINE c_v128 c_v128_cmpeq_16(c_v128 a, c_v128 b) {
c_v64_cmpeq_16(a.v64[0], b.v64[0]));
}
SIMD_INLINE c_v128 c_v128_shl_n_byte(c_v128 a, const unsigned int n) {
SIMD_INLINE c_v128 c_v128_shl_n_byte(c_v128 a, unsigned int n) {
if (n < 8)
return c_v128_from_v64(c_v64_or(c_v64_shl_n_byte(a.v64[1], n),
c_v64_shr_n_byte(a.v64[0], 8 - n)),
......@@ -610,7 +610,7 @@ SIMD_INLINE c_v128 c_v128_shl_n_byte(c_v128 a, const unsigned int n) {
return c_v128_from_v64(c_v64_shl_n_byte(a.v64[0], n - 8), c_v64_zero());
}
SIMD_INLINE c_v128 c_v128_shr_n_byte(c_v128 a, const unsigned int n) {
SIMD_INLINE c_v128 c_v128_shr_n_byte(c_v128 a, unsigned int n) {
if (n < 8)
return c_v128_from_v64(c_v64_shr_n_byte(a.v64[1], n),
c_v64_or(c_v64_shr_n_byte(a.v64[0], n),
......@@ -619,7 +619,7 @@ SIMD_INLINE c_v128 c_v128_shr_n_byte(c_v128 a, const unsigned int n) {
return c_v128_from_v64(c_v64_zero(), c_v64_shr_n_byte(a.v64[1], n - 8));
}
SIMD_INLINE c_v128 c_v128_align(c_v128 a, c_v128 b, const unsigned int c) {
SIMD_INLINE c_v128 c_v128_align(c_v128 a, c_v128 b, unsigned int c) {
if (SIMD_CHECK && c > 15) {
fprintf(stderr, "Error: undefined alignment %d\n", c);
abort();
......@@ -628,79 +628,79 @@ SIMD_INLINE c_v128 c_v128_align(c_v128 a, c_v128 b, const unsigned int c) {
: b;
}
SIMD_INLINE c_v128 c_v128_shl_8(c_v128 a, const unsigned int c) {
SIMD_INLINE c_v128 c_v128_shl_8(c_v128 a, unsigned int c) {
return c_v128_from_v64(c_v64_shl_8(a.v64[1], c), c_v64_shl_8(a.v64[0], c));
}
SIMD_INLINE c_v128 c_v128_shr_u8(c_v128 a, const unsigned int c) {
SIMD_INLINE c_v128 c_v128_shr_u8(c_v128 a, unsigned int c) {
return c_v128_from_v64(c_v64_shr_u8(a.v64[1], c), c_v64_shr_u8(a.v64[0], c));
}
SIMD_INLINE c_v128 c_v128_shr_s8(c_v128 a, const unsigned int c) {
SIMD_INLINE c_v128 c_v128_shr_s8(c_v128 a, unsigned int c) {
return c_v128_from_v64(c_v64_shr_s8(a.v64[1], c), c_v64_shr_s8(a.v64[0], c));
}
SIMD_INLINE c_v128 c_v128_shl_16(c_v128 a, const unsigned int c) {
SIMD_INLINE c_v128 c_v128_shl_16(c_v128 a, unsigned int c) {
return c_v128_from_v64(c_v64_shl_16(a.v64[1], c), c_v64_shl_16(a.v64[0], c));
}
SIMD_INLINE c_v128 c_v128_shr_u16(c_v128 a, const unsigned int c) {
SIMD_INLINE c_v128 c_v128_shr_u16(c_v128 a, unsigned int c) {
return c_v128_from_v64(c_v64_shr_u16(a.v64[1], c),
c_v64_shr_u16(a.v64[0], c));
}
SIMD_INLINE c_v128 c_v128_shr_s16(c_v128 a, const unsigned int c) {
SIMD_INLINE c_v128 c_v128_shr_s16(c_v128 a, unsigned int c) {
return c_v128_from_v64(c_v64_shr_s16(a.v64[1], c),
c_v64_shr_s16(a.v64[0], c));
}
SIMD_INLINE c_v128 c_v128_shl_32(c_v128 a, const unsigned int c) {
SIMD_INLINE c_v128 c_v128_shl_32(c_v128 a, unsigned int c) {
return c_v128_from_v64(c_v64_shl_32(a.v64[1], c), c_v64_shl_32(a.v64[0], c));
}
SIMD_INLINE c_v128 c_v128_shr_u32(c_v128 a, const unsigned int c) {
SIMD_INLINE c_v128 c_v128_shr_u32(c_v128 a, unsigned int c) {
return c_v128_from_v64(c_v64_shr_u32(a.v64[1], c),
c_v64_shr_u32(a.v64[0], c));
}
SIMD_INLINE c_v128 c_v128_shr_s32(c_v128 a, const unsigned int c) {
SIMD_INLINE c_v128 c_v128_shr_s32(c_v128 a, unsigned int c) {
return c_v128_from_v64(c_v64_shr_s32(a.v64[1], c),
c_v64_shr_s32(a.v64[0], c));
}
SIMD_INLINE c_v128 c_v128_shl_n_8(c_v128 a, const unsigned int n) {
SIMD_INLINE c_v128 c_v128_shl_n_8(c_v128 a, unsigned int n) {
return c_v128_shl_8(a, n);
}
SIMD_INLINE c_v128 c_v128_shl_n_16(c_v128 a, const unsigned int n) {
SIMD_INLINE c_v128 c_v128_shl_n_16(c_v128 a, unsigned int n) {
return c_v128_shl_16(a, n);
}
SIMD_INLINE c_v128 c_v128_shl_n_32(c_v128 a, const unsigned int n) {
SIMD_INLINE c_v128 c_v128_shl_n_32(c_v128 a, unsigned int n) {
return c_v128_shl_32(a, n);
}
SIMD_INLINE c_v128 c_v128_shr_n_u8(c_v128 a, const unsigned int n) {
SIMD_INLINE c_v128 c_v128_shr_n_u8(c_v128 a, unsigned int n) {
return c_v128_shr_u8(a, n);
}
SIMD_INLINE c_v128 c_v128_shr_n_u16(c_v128 a, const unsigned int n) {
SIMD_INLINE c_v128 c_v128_shr_n_u16(c_v128 a, unsigned int n) {
return c_v128_shr_u16(a, n);
}
SIMD_INLINE c_v128 c_v128_shr_n_u32(c_v128 a, const unsigned int n) {
SIMD_INLINE c_v128 c_v128_shr_n_u32(c_v128 a, unsigned int n) {
return c_v128_shr_u32(a, n);
}
SIMD_INLINE c_v128 c_v128_shr_n_s8(c_v128 a, const unsigned int n) {
SIMD_INLINE c_v128 c_v128_shr_n_s8(c_v128 a, unsigned int n) {
return c_v128_shr_s8(a, n);
}
SIMD_INLINE c_v128 c_v128_shr_n_s16(c_v128 a, const unsigned int n) {
SIMD_INLINE c_v128 c_v128_shr_n_s16(c_v128 a, unsigned int n) {
return c_v128_shr_s16(a, n);
}
SIMD_INLINE c_v128 c_v128_shr_n_s32(c_v128 a, const unsigned int n) {
SIMD_INLINE c_v128 c_v128_shr_n_s32(c_v128 a, unsigned int n) {
return c_v128_shr_s32(a, n);
}
......
......@@ -62,7 +62,7 @@ SIMD_INLINE void v128_store_unaligned(void *p, v128 a) {
// Some compilers will check this during optimisation, others wont.
#if defined(__OPTIMIZE__) && __OPTIMIZE__ && !defined(__clang__)
#if defined(__SSSE3__)
SIMD_INLINE v128 v128_align(v128 a, v128 b, const unsigned int c) {
SIMD_INLINE v128 v128_align(v128 a, v128 b, unsigned int c) {
return c ? _mm_alignr_epi8(a, b, c) : b;
}
#else
......
......@@ -51,7 +51,7 @@ SIMD_INLINE void v256_store_aligned(void *p, v256 a) {
c_v256_store_aligned(p, a);
}
SIMD_INLINE v256 v256_align(v256 a, v256 b, const unsigned int c) {
SIMD_INLINE v256 v256_align(v256 a, v256 b, unsigned int c) {
return c_v256_align(a, b, c);
}
......@@ -246,37 +246,37 @@ SIMD_INLINE v256 v256_shr_s32(v256 a, unsigned int c) {
return c_v256_shr_s32(a, c);
}
SIMD_INLINE v256 v256_shr_n_byte(v256 a, const unsigned int n) {
SIMD_INLINE v256 v256_shr_n_byte(v256 a, unsigned int n) {
return c_v256_shr_n_byte(a, n);
}
SIMD_INLINE v256 v256_shl_n_byte(v256 a, const unsigned int n) {
SIMD_INLINE v256 v256_shl_n_byte(v256 a, unsigned int n) {
return c_v256_shl_n_byte(a, n);
}
SIMD_INLINE v256 v256_shl_n_8(v256 a, const unsigned int n) {
SIMD_INLINE v256 v256_shl_n_8(v256 a, unsigned int n) {
return c_v256_shl_n_8(a, n);
}
SIMD_INLINE v256 v256_shl_n_16(v256 a, const unsigned int n) {
SIMD_INLINE v256 v256_shl_n_16(v256 a, unsigned int n) {
return c_v256_shl_n_16(a, n);
}
SIMD_INLINE v256 v256_shl_n_32(v256 a, const unsigned int n) {
SIMD_INLINE v256 v256_shl_n_32(v256 a, unsigned int n) {
return c_v256_shl_n_32(a, n);
}
SIMD_INLINE v256 v256_shr_n_u8(v256 a, const unsigned int n) {
SIMD_INLINE v256 v256_shr_n_u8(v256 a, unsigned int n) {
return c_v256_shr_n_u8(a, n);
}
SIMD_INLINE v256 v256_shr_n_u16(v256 a, const unsigned int n) {
SIMD_INLINE v256 v256_shr_n_u16(v256 a, unsigned int n) {
return c_v256_shr_n_u16(a, n);
}
SIMD_INLINE v256 v256_shr_n_u32(v256 a, const unsigned int n) {
SIMD_INLINE v256 v256_shr_n_u32(v256 a, unsigned int n) {
return c_v256_shr_n_u32(a, n);
}
SIMD_INLINE v256 v256_shr_n_s8(v256 a, const unsigned int n) {
SIMD_INLINE v256 v256_shr_n_s8(v256 a, unsigned int n) {
return c_v256_shr_n_s8(a, n);
}
SIMD_INLINE v256 v256_shr_n_s16(v256 a, const unsigned int n) {
SIMD_INLINE v256 v256_shr_n_s16(v256 a, unsigned int n) {
return c_v256_shr_n_s16(a, n);
}
SIMD_INLINE v256 v256_shr_n_s32(v256 a, const unsigned int n) {
SIMD_INLINE v256 v256_shr_n_s32(v256 a, unsigned int n) {
return c_v256_shr_n_s32(a, n);
}
......
......@@ -607,7 +607,7 @@ SIMD_INLINE c_v256 c_v256_cmpeq_16(c_v256 a, c_v256 b) {
c_v128_cmpeq_16(a.v128[0], b.v128[0]));
}
SIMD_INLINE c_v256 c_v256_shl_n_byte(c_v256 a, const unsigned int n) {
SIMD_INLINE c_v256 c_v256_shl_n_byte(c_v256 a, unsigned int n) {
if (n < 16)
return c_v256_from_v128(c_v128_or(c_v128_shl_n_byte(a.v128[1], n),
c_v128_shr_n_byte(a.v128[0], 16 - n)),
......@@ -619,7 +619,7 @@ SIMD_INLINE c_v256 c_v256_shl_n_byte(c_v256 a, const unsigned int n) {
return c_v256_from_v128(c_v256_low_v128(a), c_v128_zero());
}
SIMD_INLINE c_v256 c_v256_shr_n_byte(c_v256 a, const unsigned int n) {
SIMD_INLINE c_v256 c_v256_shr_n_byte(c_v256 a, unsigned int n) {
if (n < 16)
return c_v256_from_v128(c_v128_shr_n_byte(a.v128[1], n),
c_v128_or(c_v128_shr_n_byte(a.v128[0], n),
......@@ -631,7 +631,7 @@ SIMD_INLINE c_v256 c_v256_shr_n_byte(c_v256 a, const unsigned int n) {
return c_v256_from_v128(c_v128_zero(), c_v256_high_v128(a));
}
SIMD_INLINE c_v256 c_v256_align(c_v256 a, c_v256 b, const unsigned int c) {
SIMD_INLINE c_v256 c_v256_align(c_v256 a, c_v256 b, unsigned int c) {
if (SIMD_CHECK && c > 31) {
fprintf(stderr, "Error: undefined alignment %d\n", c);
abort();
......@@ -640,84 +640,84 @@ SIMD_INLINE c_v256 c_v256_align(c_v256 a, c_v256 b, const unsigned int c) {
: b;
}
SIMD_INLINE c_v256 c_v256_shl_8(c_v256 a, const unsigned int c) {
SIMD_INLINE c_v256 c_v256_shl_8(c_v256 a, unsigned int c) {
return c_v256_from_v128(c_v128_shl_8(a.v128[1], c),
c_v128_shl_8(a.v128[0], c));
}
SIMD_INLINE c_v256 c_v256_shr_u8(c_v256 a, const unsigned int c) {
SIMD_INLINE c_v256 c_v256_shr_u8(c_v256 a, unsigned int c) {
return c_v256_from_v128(c_v128_shr_u8(a.v128[1], c),
c_v128_shr_u8(a.v128[0], c));
}
SIMD_INLINE c_v256 c_v256_shr_s8(c_v256 a, const unsigned int c) {
SIMD_INLINE c_v256 c_v256_shr_s8(c_v256 a, unsigned int c) {
return c_v256_from_v128(c_v128_shr_s8(a.v128[1], c),
c_v128_shr_s8(a.v128[0], c));
}
SIMD_INLINE c_v256 c_v256_shl_16(c_v256 a, const unsigned int c) {
SIMD_INLINE c_v256 c_v256_shl_16(c_v256 a, unsigned int c) {
return c_v256_from_v128(c_v128_shl_16(a.v128[1], c),
c_v128_shl_16(a.v128[0], c));
}
SIMD_INLINE c_v256 c_v256_shr_u16(c_v256 a, const unsigned int c) {
SIMD_INLINE c_v256 c_v256_shr_u16(c_v256 a, unsigned int c) {
return c_v256_from_v128(c_v128_shr_u16(a.v128[1], c),
c_v128_shr_u16(a.v128[0], c));
}
SIMD_INLINE c_v256 c_v256_shr_s16(c_v256 a, const unsigned int c) {
SIMD_INLINE c_v256 c_v256_shr_s16(c_v256 a, unsigned int c) {
return c_v256_from_v128(c_v128_shr_s16(a.v128[1], c),
c_v128_shr_s16(a.v128[0], c));
}
SIMD_INLINE c_v256 c_v256_shl_32(c_v256 a, const unsigned int c) {
SIMD_INLINE c_v256 c_v256_shl_32(c_v256 a, unsigned int c) {
return c_v256_from_v128(c_v128_shl_32(a.v128[1], c),
c_v128_shl_32(a.v128[0], c));
}
SIMD_INLINE c_v256 c_v256_shr_u32(c_v256 a, const unsigned int c) {
SIMD_INLINE c_v256 c_v256_shr_u32(c_v256 a, unsigned int c) {
return c_v256_from_v128(c_v128_shr_u32(a.v128[1], c),
c_v128_shr_u32(a.v128[0], c));
}
SIMD_INLINE c_v256 c_v256_shr_s32(c_v256 a, const unsigned int c) {
SIMD_INLINE c_v256 c_v256_shr_s32(c_v256 a, unsigned int c) {
return c_v256_from_v128(c_v128_shr_s32(a.v128[1], c),
c_v128_shr_s32(a.v128[0], c));
}
SIMD_INLINE c_v256 c_v256_shl_n_8(c_v256 a, const unsigned int n) {
SIMD_INLINE c_v256 c_v256_shl_n_8(c_v256 a, unsigned int n) {
return c_v256_shl_8(a, n);
}
SIMD_INLINE c_v256 c_v256_shl_n_16(c_v256 a, const unsigned int n) {
SIMD_INLINE c_v256 c_v256_shl_n_16(c_v256 a, unsigned int n) {
return c_v256_shl_16(a, n);
}
SIMD_INLINE c_v256 c_v256_shl_n_32(c_v256 a, const unsigned int n) {
SIMD_INLINE c_v256 c_v256_shl_n_32(c_v256 a, unsigned int n) {
return c_v256_shl_32(a, n);
}
SIMD_INLINE c_v256 c_v256_shr_n_u8(c_v256 a, const unsigned int n) {
SIMD_INLINE c_v256 c_v256_shr_n_u8(c_v256 a, unsigned int n) {
return c_v256_shr_u8(a, n);
}
SIMD_INLINE c_v256 c_v256_shr_n_u16(c_v256 a, const unsigned int n) {
SIMD_INLINE c_v256 c_v256_shr_n_u16(c_v256 a, unsigned int n) {
return c_v256_shr_u16(a, n);
}
SIMD_INLINE c_v256 c_v256_shr_n_u32(c_v256 a, const unsigned int n) {
SIMD_INLINE c_v256 c_v256_shr_n_u32(c_v256 a, unsigned int n) {
return c_v256_shr_u32(a, n);
}
SIMD_INLINE c_v256 c_v256_shr_n_s8(c_v256 a, const unsigned int n) {
SIMD_INLINE c_v256 c_v256_shr_n_s8(c_v256 a, unsigned int n) {
return c_v256_shr_s8(a, n);
}
SIMD_INLINE c_v256 c_v256_shr_n_s16(c_v256 a, const unsigned int n) {
SIMD_INLINE c_v256 c_v256_shr_n_s16(c_v256 a, unsigned int n) {
return c_v256_shr_s16(a, n);
}
SIMD_INLINE c_v256 c_v256_shr_n_s32(c_v256 a, const unsigned int n) {
SIMD_INLINE c_v256 c_v256_shr_n_s32(c_v256 a, unsigned int n) {
return c_v256_shr_s32(a, n);
}
......
......@@ -468,39 +468,39 @@ SIMD_INLINE v256 v256_cmpeq_16(v256 a, v256 b) {
return v256_from_v128(v128_cmpeq_16(a.hi, b.hi), v128_cmpeq_16(a.lo, b.lo));
}
SIMD_INLINE v256 v256_shl_8(v256 a, const unsigned int c) {
SIMD_INLINE v256 v256_shl_8(v256 a, unsigned int c) {
return v256_from_v128(v128_shl_8(a.hi, c), v128_shl_8(a.lo, c));
}
SIMD_INLINE v256 v256_shr_u8(v256 a, const unsigned int c) {
SIMD_INLINE v256 v256_shr_u8(v256 a, unsigned int c) {
return v256_from_v128(v128_shr_u8(a.hi, c), v128_shr_u8(a.lo, c));
}
SIMD_INLINE v256 v256_shr_s8(v256 a, const unsigned int c) {
SIMD_INLINE v256 v256_shr_s8(v256 a, unsigned int c) {
return v256_from_v128(v128_shr_s8(a.hi, c), v128_shr_s8(a.lo, c));
}
SIMD_INLINE v256 v256_shl_16(v256 a, const unsigned int c) {
SIMD_INLINE v256 v256_shl_16(v256 a, unsigned int c) {
return v256_from_v128(v128_shl_16(a.hi, c), v128_shl_16(a.lo, c));
}
SIMD_INLINE v256 v256_shr_u16(v256 a, const unsigned int c) {
SIMD_INLINE v256 v256_shr_u16(v256 a, unsigned int c) {
return v256_from_v128(v128_shr_u16(a.hi, c), v128_shr_u16(a.lo, c));
}
SIMD_INLINE v256 v256_shr_s16(v256 a, const unsigned int c) {
SIMD_INLINE v256 v256_shr_s16(v256 a, unsigned int c) {
return v256_from_v128(v128_shr_s16(a.hi, c), v128_shr_s16(a.lo, c));
}
SIMD_INLINE v256 v256_shl_32(v256 a, const unsigned int c) {
SIMD_INLINE v256 v256_shl_32(v256 a, unsigned int c) {
return v256_from_v128(v128_shl_32(a.hi, c), v128_shl_32(a.lo, c));
}
SIMD_INLINE v256 v256_shr_u32(v256 a, const unsigned int c) {
SIMD_INLINE v256 v256_shr_u32(v256 a, unsigned int c) {
return v256_from_v128(v128_shr_u32(a.hi, c), v128_shr_u32(a.lo, c));
}
SIMD_INLINE v256 v256_shr_s32(v256 a, const unsigned int c) {
SIMD_INLINE v256 v256_shr_s32(v256 a, unsigned int c) {
return v256_from_v128(v128_shr_s32(a.hi, c), v128_shr_s32(a.lo, c));
}
......
......@@ -60,9 +60,7 @@ SIMD_INLINE void v64_store_aligned(void *p, v64 a) {
c_v64_store_aligned(p, a);
}
SIMD_INLINE v64 v64_align(v64 a, v64 b, const unsigned int c) {
return c_v64_align(a, b, c);
}
SIMD_INLINE v64 v64_align(v64 a, v64 b, c) { return c_v64_align(a, b, c); }
SIMD_INLINE v64 v64_zero() { return c_v64_zero(); }
SIMD_INLINE v64 v64_dup_8(uint8_t x) { return c_v64_dup_8(x); }
......@@ -188,37 +186,37 @@ SIMD_INLINE v64 v64_shr_u32(v64 a, unsigned int n) {
SIMD_INLINE v64 v64_shr_s32(v64 a, unsigned int n) {
return c_v64_shr_s32(a, n);
}
SIMD_INLINE v64 v64_shr_n_byte(v64 a, const unsigned int n) {
SIMD_INLINE v64 v64_shr_n_byte(v64 a, unsigned int n) {
return c_v64_shr_n_byte(a, n);
}
SIMD_INLINE v64 v64_shl_n_byte(v64 a, const unsigned int n) {
SIMD_INLINE v64 v64_shl_n_byte(v64 a, unsigned int n) {
return c_v64_shl_n_byte(a, n);
}
SIMD_INLINE v64 v64_shl_n_8(v64 a, const unsigned int c) {
SIMD_INLINE v64 v64_shl_n_8(v64 a, unsigned int c) {
return c_v64_shl_n_8(a, c);
}
SIMD_INLINE v64 v64_shr_n_u8(v64 a, const unsigned int c) {
SIMD_INLINE v64 v64_shr_n_u8(v64 a, unsigned int c) {
return c_v64_shr_n_u8(a, c);
}
SIMD_INLINE v64 v64_shr_n_s8(v64 a, const unsigned int c) {
SIMD_INLINE v64 v64_shr_n_s8(v64 a, unsigned int c) {
return c_v64_shr_n_s8(a, c);
}
SIMD_INLINE v64 v64_shl_n_16(v64 a, const unsigned int c) {
SIMD_INLINE v64 v64_shl_n_16(v64 a, unsigned int c) {
return c_v64_shl_n_16(a, c);
}
SIMD_INLINE v64 v64_shr_n_u16(v64 a, const unsigned int c) {
SIMD_INLINE v64 v64_shr_n_u16(v64 a, unsigned int c) {
return c_v64_shr_n_u16(a, c);
}
SIMD_INLINE v64 v64_shr_n_s16(v64 a, const unsigned int c) {
SIMD_INLINE v64 v64_shr_n_s16(v64 a, unsigned int c) {
return c_v64_shr_n_s16(a, c);
}
SIMD_INLINE v64 v64_shl_n_32(v64 a, const unsigned int c) {
SIMD_INLINE v64 v64_shl_n_32(v64 a, unsigned int c) {
return c_v64_shl_n_32(a, c);
}
SIMD_INLINE v64 v64_shr_n_u32(v64 a, const unsigned int c) {
SIMD_INLINE v64 v64_shr_n_u32(v64 a, unsigned int c) {
return c_v64_shr_n_u32(a, c);
}
SIMD_INLINE v64 v64_shr_n_s32(v64 a, const unsigned int c) {
SIMD_INLINE v64 v64_shr_n_s32(v64 a, unsigned int c) {
return c_v64_shr_n_s32(a, c);
}
......
......@@ -95,7 +95,7 @@ SIMD_INLINE void v64_store_unaligned(void *p, v64 r) {
// The following function requires an immediate.
// Some compilers will check this if it's optimising, others wont.
SIMD_INLINE v64 v64_align(v64 a, v64 b, const unsigned int c) {
SIMD_INLINE v64 v64_align(v64 a, v64 b, unsigned int c) {
#if defined(__OPTIMIZE__) && __OPTIMIZE__ && !defined(__clang__)
return c ? vreinterpret_s64_s8(
vext_s8(vreinterpret_s8_s64(b), vreinterpret_s8_s64(a), c))
......@@ -498,93 +498,83 @@ SIMD_INLINE v64 v64_shr_s32(v64 a, unsigned int c) {
// Some compilers will check this during optimisation, others wont.
#if defined(__OPTIMIZE__) && __OPTIMIZE__ && !defined(__clang__)
SIMD_INLINE v64 v64_shl_n_byte(v64 a, const unsigned int c) {
SIMD_INLINE v64 v64_shl_n_byte(v64 a, unsigned int c) {
return vshl_n_s64(a, c * 8);