Commit 86b36d92 authored by Erik de Castro Lopo's avatar Erik de Castro Lopo
Browse files

libFLAC: Refactoring

No functional changes.

Patch-from: lvqcl <lvqcl.mail@gmail.com>
parent 239843e5
......@@ -3984,29 +3984,25 @@ void precompute_partition_info_sums_(
/* first do max_partition_order */
{
const unsigned threshold = 32 - FLAC__bitmath_ilog2(default_partition_samples);
unsigned partition, residual_sample, end = (unsigned)(-(int)predictor_order);
/* WATCHOUT: "+ bps + FLAC__MAX_EXTRA_RESIDUAL_BPS" is the maximum
* assumed size of the average residual magnitude */
if(FLAC__bitmath_ilog2(default_partition_samples) + bps + FLAC__MAX_EXTRA_RESIDUAL_BPS < 32) {
FLAC__uint32 abs_residual_partition_sum;
/* WATCHOUT: "bps + FLAC__MAX_EXTRA_RESIDUAL_BPS" is the maximum assumed size of the average residual magnitude */
if(bps + FLAC__MAX_EXTRA_RESIDUAL_BPS < threshold) {
for(partition = residual_sample = 0; partition < partitions; partition++) {
FLAC__uint32 abs_residual_partition_sum = 0;
end += default_partition_samples;
abs_residual_partition_sum = 0;
for( ; residual_sample < end; residual_sample++)
abs_residual_partition_sum += abs(residual[residual_sample]); /* abs(INT_MIN) is undefined, but if the residual is INT_MIN we have bigger problems */
abs_residual_partition_sums[partition] = abs_residual_partition_sum;
}
}
else { /* have to pessimistically use 64 bits for accumulator */
FLAC__uint64 abs_residual_partition_sum;
for(partition = residual_sample = 0; partition < partitions; partition++) {
FLAC__uint64 abs_residual_partition_sum64 = 0;
end += default_partition_samples;
abs_residual_partition_sum = 0;
for( ; residual_sample < end; residual_sample++)
abs_residual_partition_sum += abs(residual[residual_sample]); /* abs(INT_MIN) is undefined, but if the residual is INT_MIN we have bigger problems */
abs_residual_partition_sums[partition] = abs_residual_partition_sum;
abs_residual_partition_sum64 += abs(residual[residual_sample]); /* abs(INT_MIN) is undefined, but if the residual is INT_MIN we have bigger problems */
abs_residual_partition_sums[partition] = abs_residual_partition_sum64;
}
}
}
......
......@@ -55,29 +55,29 @@ void FLAC__precompute_partition_info_sums_intrin_avx2(const FLAC__int32 residual
/* first do max_partition_order */
{
const unsigned threshold = 32 - FLAC__bitmath_ilog2(default_partition_samples);
unsigned partition, residual_sample, end = (unsigned)(-(int)predictor_order);
__m256i res256, sum256;
__m128i res128, sum128;
if(FLAC__bitmath_ilog2(default_partition_samples) + bps + FLAC__MAX_EXTRA_RESIDUAL_BPS < 32) {
if(bps + FLAC__MAX_EXTRA_RESIDUAL_BPS < threshold) {
for(partition = residual_sample = 0; partition < partitions; partition++) {
__m256i sum256 = _mm256_setzero_si256();
__m128i sum128;
end += default_partition_samples;
sum256 = _mm256_setzero_si256();
for( ; (int)residual_sample < (int)end-7; residual_sample+=8) {
res256 = _mm256_abs_epi32(_mm256_loadu_si256((const __m256i*)(residual+residual_sample)));
__m256i res256 = _mm256_abs_epi32(_mm256_loadu_si256((const __m256i*)(residual+residual_sample)));
sum256 = _mm256_add_epi32(sum256, res256);
}
sum128 = _mm_add_epi32(_mm256_extracti128_si256(sum256, 1), _mm256_castsi256_si128(sum256));
for( ; (int)residual_sample < (int)end-3; residual_sample+=4) {
res128 = _mm_abs_epi32(_mm_loadu_si128((const __m128i*)(residual+residual_sample)));
__m128i res128 = _mm_abs_epi32(_mm_loadu_si128((const __m128i*)(residual+residual_sample)));
sum128 = _mm_add_epi32(sum128, res128);
}
for( ; residual_sample < end; residual_sample++) {
res128 = _mm_cvtsi32_si128(residual[residual_sample]);
__m128i res128 = _mm_cvtsi32_si128(residual[residual_sample]);
res128 = _mm_abs_epi32(res128);
sum128 = _mm_add_epi32(sum128, res128);
}
......@@ -89,26 +89,27 @@ void FLAC__precompute_partition_info_sums_intrin_avx2(const FLAC__int32 residual
}
else { /* have to pessimistically use 64 bits for accumulator */
for(partition = residual_sample = 0; partition < partitions; partition++) {
__m256i sum256 = _mm256_setzero_si256();
__m128i sum128;
end += default_partition_samples;
sum256 = _mm256_setzero_si256();
for( ; (int)residual_sample < (int)end-3; residual_sample+=4) {
res128 = _mm_abs_epi32(_mm_loadu_si128((const __m128i*)(residual+residual_sample)));
res256 = _mm256_cvtepu32_epi64(res128);
__m128i res128 = _mm_abs_epi32(_mm_loadu_si128((const __m128i*)(residual+residual_sample)));
__m256i res256 = _mm256_cvtepu32_epi64(res128);
sum256 = _mm256_add_epi64(sum256, res256);
}
sum128 = _mm_add_epi64(_mm256_extracti128_si256(sum256, 1), _mm256_castsi256_si128(sum256));
for( ; (int)residual_sample < (int)end-1; residual_sample+=2) {
res128 = _mm_loadl_epi64((const __m128i*)(residual+residual_sample));
__m128i res128 = _mm_loadl_epi64((const __m128i*)(residual+residual_sample));
res128 = _mm_abs_epi32(res128);
res128 = _mm_cvtepu32_epi64(res128);
sum128 = _mm_add_epi64(sum128, res128);
}
for( ; residual_sample < end; residual_sample++) {
res128 = _mm_cvtsi32_si128(residual[residual_sample]);
__m128i res128 = _mm_cvtsi32_si128(residual[residual_sample]);
res128 = _mm_abs_epi32(res128);
sum128 = _mm_add_epi64(sum128, res128);
}
......
......@@ -55,14 +55,14 @@ void FLAC__precompute_partition_info_sums_intrin_sse2(const FLAC__int32 residual
/* first do max_partition_order */
{
const unsigned threshold = 32 - FLAC__bitmath_ilog2(default_partition_samples);
unsigned partition, residual_sample, end = (unsigned)(-(int)predictor_order);
unsigned e1, e3;
__m128i mm_res, mm_sum, mm_mask;
if(FLAC__bitmath_ilog2(default_partition_samples) + bps + FLAC__MAX_EXTRA_RESIDUAL_BPS < 32) {
if(bps + FLAC__MAX_EXTRA_RESIDUAL_BPS < threshold) {
for(partition = residual_sample = 0; partition < partitions; partition++) {
__m128i mm_sum = _mm_setzero_si128();
unsigned e1, e3;
end += default_partition_samples;
mm_sum = _mm_setzero_si128();
e1 = (residual_sample + 3) & ~3; e3 = end & ~3;
if(e1 > end)
......@@ -70,24 +70,24 @@ void FLAC__precompute_partition_info_sums_intrin_sse2(const FLAC__int32 residual
/* assumption: residual[] is properly aligned so (residual + e1) is properly aligned too and _mm_loadu_si128() is fast */
for( ; residual_sample < e1; residual_sample++) {
mm_res = _mm_cvtsi32_si128(residual[residual_sample]);
mm_mask = _mm_srai_epi32(mm_res, 31);
__m128i mm_res = _mm_cvtsi32_si128(residual[residual_sample]);
__m128i mm_mask = _mm_srai_epi32(mm_res, 31);
mm_res = _mm_xor_si128(mm_res, mm_mask);
mm_res = _mm_sub_epi32(mm_res, mm_mask); /* abs(INT_MIN) is undefined, but if the residual is INT_MIN we have bigger problems */
mm_sum = _mm_add_epi32(mm_sum, mm_res);
}
for( ; residual_sample < e3; residual_sample+=4) {
mm_res = _mm_loadu_si128((const __m128i*)(residual+residual_sample));
mm_mask = _mm_srai_epi32(mm_res, 31);
__m128i mm_res = _mm_loadu_si128((const __m128i*)(residual+residual_sample));
__m128i mm_mask = _mm_srai_epi32(mm_res, 31);
mm_res = _mm_xor_si128(mm_res, mm_mask);
mm_res = _mm_sub_epi32(mm_res, mm_mask);
mm_sum = _mm_add_epi32(mm_sum, mm_res);
}
for( ; residual_sample < end; residual_sample++) {
mm_res = _mm_cvtsi32_si128(residual[residual_sample]);
mm_mask = _mm_srai_epi32(mm_res, 31);
__m128i mm_res = _mm_cvtsi32_si128(residual[residual_sample]);
__m128i mm_mask = _mm_srai_epi32(mm_res, 31);
mm_res = _mm_xor_si128(mm_res, mm_mask);
mm_res = _mm_sub_epi32(mm_res, mm_mask);
mm_sum = _mm_add_epi32(mm_sum, mm_res);
......@@ -100,23 +100,24 @@ void FLAC__precompute_partition_info_sums_intrin_sse2(const FLAC__int32 residual
}
else { /* have to pessimistically use 64 bits for accumulator */
for(partition = residual_sample = 0; partition < partitions; partition++) {
__m128i mm_sum = _mm_setzero_si128();
unsigned e1, e3;
end += default_partition_samples;
mm_sum = _mm_setzero_si128();
e1 = (residual_sample + 1) & ~1; e3 = end & ~1;
FLAC__ASSERT(e1 <= end);
for( ; residual_sample < e1; residual_sample++) {
mm_res = _mm_cvtsi32_si128(residual[residual_sample]); /* 0 0 0 r0 */
mm_mask = _mm_srai_epi32(mm_res, 31);
__m128i mm_res = _mm_cvtsi32_si128(residual[residual_sample]); /* 0 0 0 r0 */
__m128i mm_mask = _mm_srai_epi32(mm_res, 31);
mm_res = _mm_xor_si128(mm_res, mm_mask);
mm_res = _mm_sub_epi32(mm_res, mm_mask); /* 0 0 0 |r0| == 00 |r0_64| */
mm_sum = _mm_add_epi64(mm_sum, mm_res);
}
for( ; residual_sample < e3; residual_sample+=2) {
mm_res = _mm_loadl_epi64((const __m128i*)(residual+residual_sample)); /* 0 0 r1 r0 */
mm_mask = _mm_srai_epi32(mm_res, 31);
__m128i mm_res = _mm_loadl_epi64((const __m128i*)(residual+residual_sample)); /* 0 0 r1 r0 */
__m128i mm_mask = _mm_srai_epi32(mm_res, 31);
mm_res = _mm_xor_si128(mm_res, mm_mask);
mm_res = _mm_sub_epi32(mm_res, mm_mask); /* 0 0 |r1| |r0| */
mm_res = _mm_shuffle_epi32(mm_res, _MM_SHUFFLE(3,1,2,0)); /* 0 |r1| 0 |r0| == |r1_64| |r0_64| */
......@@ -124,8 +125,8 @@ void FLAC__precompute_partition_info_sums_intrin_sse2(const FLAC__int32 residual
}
for( ; residual_sample < end; residual_sample++) {
mm_res = _mm_cvtsi32_si128(residual[residual_sample]);
mm_mask = _mm_srai_epi32(mm_res, 31);
__m128i mm_res = _mm_cvtsi32_si128(residual[residual_sample]);
__m128i mm_mask = _mm_srai_epi32(mm_res, 31);
mm_res = _mm_xor_si128(mm_res, mm_mask);
mm_res = _mm_sub_epi32(mm_res, mm_mask);
mm_sum = _mm_add_epi64(mm_sum, mm_res);
......
......@@ -55,14 +55,14 @@ void FLAC__precompute_partition_info_sums_intrin_ssse3(const FLAC__int32 residua
/* first do max_partition_order */
{
const unsigned threshold = 32 - FLAC__bitmath_ilog2(default_partition_samples);
unsigned partition, residual_sample, end = (unsigned)(-(int)predictor_order);
unsigned e1, e3;
__m128i mm_res, mm_sum;
if(FLAC__bitmath_ilog2(default_partition_samples) + bps + FLAC__MAX_EXTRA_RESIDUAL_BPS < 32) {
if(bps + FLAC__MAX_EXTRA_RESIDUAL_BPS < threshold) {
for(partition = residual_sample = 0; partition < partitions; partition++) {
__m128i mm_sum = _mm_setzero_si128();
unsigned e1, e3;
end += default_partition_samples;
mm_sum = _mm_setzero_si128();
e1 = (residual_sample + 3) & ~3; e3 = end & ~3;
if(e1 > end)
......@@ -70,19 +70,19 @@ void FLAC__precompute_partition_info_sums_intrin_ssse3(const FLAC__int32 residua
/* assumption: residual[] is properly aligned so (residual + e1) is properly aligned too and _mm_loadu_si128() is fast */
for( ; residual_sample < e1; residual_sample++) {
mm_res = _mm_cvtsi32_si128(residual[residual_sample]);
__m128i mm_res = _mm_cvtsi32_si128(residual[residual_sample]);
mm_res = _mm_abs_epi32(mm_res); /* abs(INT_MIN) is undefined, but if the residual is INT_MIN we have bigger problems */
mm_sum = _mm_add_epi32(mm_sum, mm_res);
}
for( ; residual_sample < e3; residual_sample+=4) {
mm_res = _mm_loadu_si128((const __m128i*)(residual+residual_sample));
__m128i mm_res = _mm_loadu_si128((const __m128i*)(residual+residual_sample));
mm_res = _mm_abs_epi32(mm_res);
mm_sum = _mm_add_epi32(mm_sum, mm_res);
}
for( ; residual_sample < end; residual_sample++) {
mm_res = _mm_cvtsi32_si128(residual[residual_sample]);
__m128i mm_res = _mm_cvtsi32_si128(residual[residual_sample]);
mm_res = _mm_abs_epi32(mm_res);
mm_sum = _mm_add_epi32(mm_sum, mm_res);
}
......@@ -94,27 +94,28 @@ void FLAC__precompute_partition_info_sums_intrin_ssse3(const FLAC__int32 residua
}
else { /* have to pessimistically use 64 bits for accumulator */
for(partition = residual_sample = 0; partition < partitions; partition++) {
__m128i mm_sum = _mm_setzero_si128();
unsigned e1, e3;
end += default_partition_samples;
mm_sum = _mm_setzero_si128();
e1 = (residual_sample + 1) & ~1; e3 = end & ~1;
FLAC__ASSERT(e1 <= end);
for( ; residual_sample < e1; residual_sample++) {
mm_res = _mm_cvtsi32_si128(residual[residual_sample]); /* 0 0 0 r0 */
__m128i mm_res = _mm_cvtsi32_si128(residual[residual_sample]); /* 0 0 0 r0 */
mm_res = _mm_abs_epi32(mm_res); /* 0 0 0 |r0| == 00 |r0_64| */
mm_sum = _mm_add_epi64(mm_sum, mm_res);
}
for( ; residual_sample < e3; residual_sample+=2) {
mm_res = _mm_loadl_epi64((const __m128i*)(residual+residual_sample)); /* 0 0 r1 r0 */
__m128i mm_res = _mm_loadl_epi64((const __m128i*)(residual+residual_sample)); /* 0 0 r1 r0 */
mm_res = _mm_abs_epi32(mm_res); /* 0 0 |r1| |r0| */
mm_res = _mm_shuffle_epi32(mm_res, _MM_SHUFFLE(3,1,2,0)); /* 0 |r1| 0 |r0| == |r1_64| |r0_64| */
mm_sum = _mm_add_epi64(mm_sum, mm_res);
}
for( ; residual_sample < end; residual_sample++) {
mm_res = _mm_cvtsi32_si128(residual[residual_sample]);
__m128i mm_res = _mm_cvtsi32_si128(residual[residual_sample]);
mm_res = _mm_abs_epi32(mm_res);
mm_sum = _mm_add_epi64(mm_sum, mm_res);
}
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment