Commit 427048f0 authored by Josh Coalson's avatar Josh Coalson
Browse files

streamline the residual energy calculation

parent b5cc55f3
......@@ -65,11 +65,19 @@ unsigned FLAC__fixed_compute_best_predictor(const FLAC__int32 data[], unsigned d
else
order = 4;
residual_bits_per_sample[0] = (FLAC__real)((data_len > 0 && total_error_0 > 0) ? log(M_LN2 * (double)total_error_0 / (double) data_len) / M_LN2 : 0.0);
residual_bits_per_sample[1] = (FLAC__real)((data_len > 0 && total_error_1 > 0) ? log(M_LN2 * (double)total_error_1 / (double) data_len) / M_LN2 : 0.0);
residual_bits_per_sample[2] = (FLAC__real)((data_len > 0 && total_error_2 > 0) ? log(M_LN2 * (double)total_error_2 / (double) data_len) / M_LN2 : 0.0);
residual_bits_per_sample[3] = (FLAC__real)((data_len > 0 && total_error_3 > 0) ? log(M_LN2 * (double)total_error_3 / (double) data_len) / M_LN2 : 0.0);
residual_bits_per_sample[4] = (FLAC__real)((data_len > 0 && total_error_4 > 0) ? log(M_LN2 * (double)total_error_4 / (double) data_len) / M_LN2 : 0.0);
/* Estimate the expected number of bits per residual signal sample. */
/* 'total_error*' is linearly related to the variance of the residual */
/* signal, so we use it directly to compute E(|x|) */
FLAC__ASSERT(data_len > 0 || total_error_0 == 0);
FLAC__ASSERT(data_len > 0 || total_error_1 == 0);
FLAC__ASSERT(data_len > 0 || total_error_2 == 0);
FLAC__ASSERT(data_len > 0 || total_error_3 == 0);
FLAC__ASSERT(data_len > 0 || total_error_4 == 0);
residual_bits_per_sample[0] = (FLAC__real)((total_error_0 > 0) ? log(M_LN2 * (double)total_error_0 / (double)data_len) / M_LN2 : 0.0);
residual_bits_per_sample[1] = (FLAC__real)((total_error_1 > 0) ? log(M_LN2 * (double)total_error_1 / (double)data_len) / M_LN2 : 0.0);
residual_bits_per_sample[2] = (FLAC__real)((total_error_2 > 0) ? log(M_LN2 * (double)total_error_2 / (double)data_len) / M_LN2 : 0.0);
residual_bits_per_sample[3] = (FLAC__real)((total_error_3 > 0) ? log(M_LN2 * (double)total_error_3 / (double)data_len) / M_LN2 : 0.0);
residual_bits_per_sample[4] = (FLAC__real)((total_error_4 > 0) ? log(M_LN2 * (double)total_error_4 / (double)data_len) / M_LN2 : 0.0);
return order;
}
......@@ -110,19 +118,24 @@ unsigned FLAC__fixed_compute_best_predictor_wide(const FLAC__int32 data[], unsig
/* Estimate the expected number of bits per residual signal sample. */
/* 'total_error*' is linearly related to the variance of the residual */
/* signal, so we use it directly to compute E(|x|) */
FLAC__ASSERT(data_len > 0 || total_error_0 == 0);
FLAC__ASSERT(data_len > 0 || total_error_1 == 0);
FLAC__ASSERT(data_len > 0 || total_error_2 == 0);
FLAC__ASSERT(data_len > 0 || total_error_3 == 0);
FLAC__ASSERT(data_len > 0 || total_error_4 == 0);
#if defined _MSC_VER || defined __MINGW32__
/* with VC++ you have to spoon feed it the casting */
residual_bits_per_sample[0] = (FLAC__real)((data_len > 0 && total_error_0 > 0) ? log(M_LN2 * (double)(FLAC__int64)total_error_0 / (double) data_len) / M_LN2 : 0.0);
residual_bits_per_sample[1] = (FLAC__real)((data_len > 0 && total_error_1 > 0) ? log(M_LN2 * (double)(FLAC__int64)total_error_1 / (double) data_len) / M_LN2 : 0.0);
residual_bits_per_sample[2] = (FLAC__real)((data_len > 0 && total_error_2 > 0) ? log(M_LN2 * (double)(FLAC__int64)total_error_2 / (double) data_len) / M_LN2 : 0.0);
residual_bits_per_sample[3] = (FLAC__real)((data_len > 0 && total_error_3 > 0) ? log(M_LN2 * (double)(FLAC__int64)total_error_3 / (double) data_len) / M_LN2 : 0.0);
residual_bits_per_sample[4] = (FLAC__real)((data_len > 0 && total_error_4 > 0) ? log(M_LN2 * (double)(FLAC__int64)total_error_4 / (double) data_len) / M_LN2 : 0.0);
residual_bits_per_sample[0] = (FLAC__real)((total_error_0 > 0) ? log(M_LN2 * (double)(FLAC__int64)total_error_0 / (double)data_len) / M_LN2 : 0.0);
residual_bits_per_sample[1] = (FLAC__real)((total_error_1 > 0) ? log(M_LN2 * (double)(FLAC__int64)total_error_1 / (double)data_len) / M_LN2 : 0.0);
residual_bits_per_sample[2] = (FLAC__real)((total_error_2 > 0) ? log(M_LN2 * (double)(FLAC__int64)total_error_2 / (double)data_len) / M_LN2 : 0.0);
residual_bits_per_sample[3] = (FLAC__real)((total_error_3 > 0) ? log(M_LN2 * (double)(FLAC__int64)total_error_3 / (double)data_len) / M_LN2 : 0.0);
residual_bits_per_sample[4] = (FLAC__real)((total_error_4 > 0) ? log(M_LN2 * (double)(FLAC__int64)total_error_4 / (double)data_len) / M_LN2 : 0.0);
#else
residual_bits_per_sample[0] = (FLAC__real)((data_len > 0 && total_error_0 > 0) ? log(M_LN2 * (double)total_error_0 / (double) data_len) / M_LN2 : 0.0);
residual_bits_per_sample[1] = (FLAC__real)((data_len > 0 && total_error_1 > 0) ? log(M_LN2 * (double)total_error_1 / (double) data_len) / M_LN2 : 0.0);
residual_bits_per_sample[2] = (FLAC__real)((data_len > 0 && total_error_2 > 0) ? log(M_LN2 * (double)total_error_2 / (double) data_len) / M_LN2 : 0.0);
residual_bits_per_sample[3] = (FLAC__real)((data_len > 0 && total_error_3 > 0) ? log(M_LN2 * (double)total_error_3 / (double) data_len) / M_LN2 : 0.0);
residual_bits_per_sample[4] = (FLAC__real)((data_len > 0 && total_error_4 > 0) ? log(M_LN2 * (double)total_error_4 / (double) data_len) / M_LN2 : 0.0);
residual_bits_per_sample[0] = (FLAC__real)((total_error_0 > 0) ? log(M_LN2 * (double)total_error_0 / (double)data_len) / M_LN2 : 0.0);
residual_bits_per_sample[1] = (FLAC__real)((total_error_1 > 0) ? log(M_LN2 * (double)total_error_1 / (double)data_len) / M_LN2 : 0.0);
residual_bits_per_sample[2] = (FLAC__real)((total_error_2 > 0) ? log(M_LN2 * (double)total_error_2 / (double)data_len) / M_LN2 : 0.0);
residual_bits_per_sample[3] = (FLAC__real)((total_error_3 > 0) ? log(M_LN2 * (double)total_error_3 / (double)data_len) / M_LN2 : 0.0);
residual_bits_per_sample[4] = (FLAC__real)((total_error_4 > 0) ? log(M_LN2 * (double)total_error_4 / (double)data_len) / M_LN2 : 0.0);
#endif
return order;
......
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment