Commit 1b888f2e authored by David Barker's avatar David Barker Committed by Debargha Mukherjee
Browse files

Optimize SSE2 warp filter

Improve the speed of the warp filter itself by ~30%. This leads
to an overall decoder speedup of 5-20%, depending on bitrate,
for the global-motion experiment, and a small speedup for
warped-motion.

Applies a very minor change to the rounding during filter
selection (ROUND_POWER_OF_TWO makes slightly more sense here
than ROUND_POWER_OF_TWO_SIGNED, and is faster)

Change-Id: I3f364221d1ec35a8aac0d2c8b0e427f527d12e43
parent 0b04e9b8
......@@ -992,8 +992,7 @@ void warp_affine_c(int32_t *mat, uint8_t *ref, int width, int height,
int ix = ix4 + l - 3;
// At this point, sx = sx4 + alpha * l + beta * k
const int16_t *coeffs =
warped_filter[ROUND_POWER_OF_TWO_SIGNED(sx,
WARPEDDIFF_PREC_BITS) +
warped_filter[ROUND_POWER_OF_TWO(sx, WARPEDDIFF_PREC_BITS) +
WARPEDPIXEL_PREC_SHIFTS];
int32_t sum = 0;
for (m = 0; m < 8; ++m) {
......@@ -1012,9 +1011,9 @@ void warp_affine_c(int32_t *mat, uint8_t *ref, int width, int height,
uint8_t *p =
&pred[(i - p_row + k + 4) * p_stride + (j - p_col + l + 4)];
// At this point, sy = sy4 + gamma * l + delta * k
const int16_t *coeffs = warped_filter[ROUND_POWER_OF_TWO_SIGNED(
sy, WARPEDDIFF_PREC_BITS) +
WARPEDPIXEL_PREC_SHIFTS];
const int16_t *coeffs =
warped_filter[ROUND_POWER_OF_TWO(sy, WARPEDDIFF_PREC_BITS) +
WARPEDPIXEL_PREC_SHIFTS];
int32_t sum = 0;
for (m = 0; m < 8; ++m) {
sum += tmp[(k + m + 4) * 8 + (l + 4)] * coeffs[m];
......
......@@ -14,6 +14,8 @@
#include "./av1_rtcd.h"
#include "av1/common/warped_motion.h"
const __m128i *const filter = (const __m128i *const)warped_filter;
/* SSE2 version of the rotzoom/affine warp filter */
void warp_affine_sse2(int32_t *mat, uint8_t *ref, int width, int height,
int stride, uint8_t *pred, int p_col, int p_row,
......@@ -22,7 +24,7 @@ void warp_affine_sse2(int32_t *mat, uint8_t *ref, int width, int height,
int32_t alpha, int32_t beta, int32_t gamma,
int32_t delta) {
__m128i tmp[15];
int i, j, k, l;
int i, j, k;
/* Note: For this code to work, the left/right frame borders need to be
extended by at least 13 pixels each. By the time we get here, other
......@@ -36,24 +38,29 @@ void warp_affine_sse2(int32_t *mat, uint8_t *ref, int width, int height,
}
}*/
for (i = p_row; i < p_row + p_height; i += 8) {
for (j = p_col; j < p_col + p_width; j += 8) {
for (i = 0; i < p_height; i += 8) {
for (j = 0; j < p_width; j += 8) {
// (x, y) coordinates of the center of this block in the destination
// image
int32_t dst_x = p_col + j + 4;
int32_t dst_y = p_row + i + 4;
int32_t x4, y4, ix4, sx4, iy4, sy4;
if (subsampling_x)
x4 = ROUND_POWER_OF_TWO_SIGNED(
mat[2] * 2 * (j + 4) + mat[3] * 2 * (i + 4) + mat[0] +
mat[2] * 2 * dst_x + mat[3] * 2 * dst_y + mat[0] +
(mat[2] + mat[3] - (1 << WARPEDMODEL_PREC_BITS)) / 2,
1);
else
x4 = mat[2] * (j + 4) + mat[3] * (i + 4) + mat[0];
x4 = mat[2] * dst_x + mat[3] * dst_y + mat[0];
if (subsampling_y)
y4 = ROUND_POWER_OF_TWO_SIGNED(
mat[4] * 2 * (j + 4) + mat[5] * 2 * (i + 4) + mat[1] +
mat[4] * 2 * dst_x + mat[5] * 2 * dst_y + mat[1] +
(mat[4] + mat[5] - (1 << WARPEDMODEL_PREC_BITS)) / 2,
1);
else
y4 = mat[4] * (j + 4) + mat[5] * (i + 4) + mat[1];
y4 = mat[4] * dst_x + mat[5] * dst_y + mat[1];
ix4 = x4 >> WARPEDMODEL_PREC_BITS;
sx4 = x4 & ((1 << WARPEDMODEL_PREC_BITS) - 1);
......@@ -61,214 +68,196 @@ void warp_affine_sse2(int32_t *mat, uint8_t *ref, int width, int height,
sy4 = y4 & ((1 << WARPEDMODEL_PREC_BITS) - 1);
// Horizontal filter
for (k = -7; k < 8; ++k) {
for (k = -7; k < AOMMIN(8, p_height - i); ++k) {
int iy = iy4 + k;
if (iy < 0)
iy = 0;
else if (iy > height - 1)
iy = height - 1;
// If the block is aligned such that, after clamping, every sample
// would be taken from the leftmost/rightmost column, then we can
// skip the expensive horizontal filter.
if (ix4 <= -7) {
// In this case, the rightmost pixel sampled is in column
// ix4 + 3 + 7 - 3 = ix4 + 7 <= 0, ie. the entire block
// will sample only from the leftmost column
// (once border extension is taken into account)
for (l = 0; l < 8; ++l) {
((int16_t *)tmp)[(k + 7) * 8 + l] =
ref[iy * stride] * (1 << WARPEDPIXEL_FILTER_BITS);
}
tmp[k + 7] =
_mm_set1_epi16(ref[iy * stride] * (1 << WARPEDPIXEL_FILTER_BITS));
} else if (ix4 >= width + 6) {
// In this case, the leftmost pixel sampled is in column
// ix4 - 3 + 0 - 3 = ix4 - 6 >= width, ie. the entire block
// will sample only from the rightmost column
// (once border extension is taken into account)
for (l = 0; l < 8; ++l) {
((int16_t *)tmp)[(k + 7) * 8 + l] =
ref[iy * stride + (width - 1)] * (1 << WARPEDPIXEL_FILTER_BITS);
}
tmp[k + 7] = _mm_set1_epi16(ref[iy * stride + (width - 1)] *
(1 << WARPEDPIXEL_FILTER_BITS));
} else {
// If we get here, then
// the leftmost pixel sampled is
// ix4 - 4 + 0 - 3 = ix4 - 7 >= -13
// and the rightmost pixel sampled is at most
// ix4 + 3 + 7 - 3 = ix4 + 7 <= width + 12
// So, assuming that border extension has been done, we
// don't need to explicitly clamp values.
int sx = sx4 + alpha * (-4) + beta * k;
// Load per-pixel coefficients
__m128i coeffs[8];
for (l = 0; l < 8; ++l) {
coeffs[l] =
((__m128i *)warped_filter)[ROUND_POWER_OF_TWO_SIGNED(
sx, WARPEDDIFF_PREC_BITS) +
WARPEDPIXEL_PREC_SHIFTS];
sx += alpha;
}
// Currently the coefficients are stored in the order
// 0 ... 7 for each pixel separately.
// Permute the coefficients into 8 registers:
// coeffs (0,1), (2,3), (4,5), (6,7) for
// i) the odd-index pixels, and ii) the even-index pixels
{
// 0 1 0 1 2 3 2 3 for 0, 2
__m128i tmp_0 = _mm_unpacklo_epi32(coeffs[0], coeffs[2]);
// 0 1 0 1 2 3 2 3 for 1, 3
__m128i tmp_1 = _mm_unpacklo_epi32(coeffs[1], coeffs[3]);
// 0 1 0 1 2 3 2 3 for 4, 6
__m128i tmp_2 = _mm_unpacklo_epi32(coeffs[4], coeffs[6]);
// 0 1 0 1 2 3 2 3 for 5, 7
__m128i tmp_3 = _mm_unpacklo_epi32(coeffs[5], coeffs[7]);
// 4 5 4 5 6 7 6 7 for 0, 2
__m128i tmp_4 = _mm_unpackhi_epi32(coeffs[0], coeffs[2]);
// 4 5 4 5 6 7 6 7 for 1, 3
__m128i tmp_5 = _mm_unpackhi_epi32(coeffs[1], coeffs[3]);
// 4 5 4 5 6 7 6 7 for 4, 6
__m128i tmp_6 = _mm_unpackhi_epi32(coeffs[4], coeffs[6]);
// 4 5 4 5 6 7 6 7 for 5, 7
__m128i tmp_7 = _mm_unpackhi_epi32(coeffs[5], coeffs[7]);
// 0 1 0 1 0 1 0 1 for 0, 2, 4, 6
__m128i coeff_a = _mm_unpacklo_epi64(tmp_0, tmp_2);
// 0 1 0 1 0 1 0 1 for 1, 3, 5, 7
__m128i coeff_b = _mm_unpacklo_epi64(tmp_1, tmp_3);
// 2 3 2 3 2 3 2 3 for 0, 2, 4, 6
__m128i coeff_c = _mm_unpackhi_epi64(tmp_0, tmp_2);
// 2 3 2 3 2 3 2 3 for 1, 3, 5, 7
__m128i coeff_d = _mm_unpackhi_epi64(tmp_1, tmp_3);
// 4 5 4 5 4 5 4 5 for 0, 2, 4, 6
__m128i coeff_e = _mm_unpacklo_epi64(tmp_4, tmp_6);
// 4 5 4 5 4 5 4 5 for 1, 3, 5, 7
__m128i coeff_f = _mm_unpacklo_epi64(tmp_5, tmp_7);
// 6 7 6 7 6 7 6 7 for 0, 2, 4, 6
__m128i coeff_g = _mm_unpackhi_epi64(tmp_4, tmp_6);
// 6 7 6 7 6 7 6 7 for 1, 3, 5, 7
__m128i coeff_h = _mm_unpackhi_epi64(tmp_5, tmp_7);
// Load source pixels and widen to 16 bits. We want the pixels
// ref[iy * stride + ix4 - 7] ... ref[iy * stride + ix4 + 7]
// inclusive.
__m128i zero = _mm_setzero_si128();
__m128i src =
_mm_loadu_si128((__m128i *)(ref + iy * stride + ix4 - 7));
// Calculate filtered results
__m128i src_a = _mm_unpacklo_epi8(src, zero);
__m128i res_a = _mm_madd_epi16(src_a, coeff_a);
__m128i src_b = _mm_unpacklo_epi8(_mm_srli_si128(src, 1), zero);
__m128i res_b = _mm_madd_epi16(src_b, coeff_b);
__m128i src_c = _mm_unpacklo_epi8(_mm_srli_si128(src, 2), zero);
__m128i res_c = _mm_madd_epi16(src_c, coeff_c);
__m128i src_d = _mm_unpacklo_epi8(_mm_srli_si128(src, 3), zero);
__m128i res_d = _mm_madd_epi16(src_d, coeff_d);
__m128i src_e = _mm_unpacklo_epi8(_mm_srli_si128(src, 4), zero);
__m128i res_e = _mm_madd_epi16(src_e, coeff_e);
__m128i src_f = _mm_unpacklo_epi8(_mm_srli_si128(src, 5), zero);
__m128i res_f = _mm_madd_epi16(src_f, coeff_f);
__m128i src_g = _mm_unpacklo_epi8(_mm_srli_si128(src, 6), zero);
__m128i res_g = _mm_madd_epi16(src_g, coeff_g);
__m128i src_h = _mm_unpacklo_epi8(_mm_srli_si128(src, 7), zero);
__m128i res_h = _mm_madd_epi16(src_h, coeff_h);
__m128i res_even = _mm_add_epi32(_mm_add_epi32(res_a, res_e),
_mm_add_epi32(res_c, res_g));
__m128i res_odd = _mm_add_epi32(_mm_add_epi32(res_b, res_f),
_mm_add_epi32(res_d, res_h));
// Combine results into one register.
// We store the columns in the order 0, 2, 4, 6, 1, 3, 5, 7
// as this order helps with the vertical filter.
tmp[k + 7] = _mm_packs_epi32(res_even, res_odd);
}
int sx = sx4 + alpha * (-4) + beta * k +
// Include rounding and offset here
(1 << (WARPEDDIFF_PREC_BITS - 1)) +
(WARPEDPIXEL_PREC_SHIFTS << WARPEDDIFF_PREC_BITS);
// Load source pixels
__m128i zero = _mm_setzero_si128();
__m128i src =
_mm_loadu_si128((__m128i *)(ref + iy * stride + ix4 - 7));
// Filter even-index pixels
__m128i tmp_0 = filter[(sx + 0 * alpha) >> WARPEDDIFF_PREC_BITS];
__m128i tmp_2 = filter[(sx + 2 * alpha) >> WARPEDDIFF_PREC_BITS];
__m128i tmp_4 = filter[(sx + 4 * alpha) >> WARPEDDIFF_PREC_BITS];
__m128i tmp_6 = filter[(sx + 6 * alpha) >> WARPEDDIFF_PREC_BITS];
// coeffs 0 1 0 1 2 3 2 3 for pixels 0, 2
__m128i tmp_8 = _mm_unpacklo_epi32(tmp_0, tmp_2);
// coeffs 0 1 0 1 2 3 2 3 for pixels 4, 6
__m128i tmp_10 = _mm_unpacklo_epi32(tmp_4, tmp_6);
// coeffs 4 5 4 5 6 7 6 7 for pixels 0, 2
__m128i tmp_12 = _mm_unpackhi_epi32(tmp_0, tmp_2);
// coeffs 4 5 4 5 6 7 6 7 for pixels 4, 6
__m128i tmp_14 = _mm_unpackhi_epi32(tmp_4, tmp_6);
// coeffs 0 1 0 1 0 1 0 1 for pixels 0, 2, 4, 6
__m128i coeff_0 = _mm_unpacklo_epi64(tmp_8, tmp_10);
// coeffs 2 3 2 3 2 3 2 3 for pixels 0, 2, 4, 6
__m128i coeff_2 = _mm_unpackhi_epi64(tmp_8, tmp_10);
// coeffs 4 5 4 5 4 5 4 5 for pixels 0, 2, 4, 6
__m128i coeff_4 = _mm_unpacklo_epi64(tmp_12, tmp_14);
// coeffs 6 7 6 7 6 7 6 7 for pixels 0, 2, 4, 6
__m128i coeff_6 = _mm_unpackhi_epi64(tmp_12, tmp_14);
// Calculate filtered results
__m128i src_0 = _mm_unpacklo_epi8(src, zero);
__m128i res_0 = _mm_madd_epi16(src_0, coeff_0);
__m128i src_2 = _mm_unpacklo_epi8(_mm_srli_si128(src, 2), zero);
__m128i res_2 = _mm_madd_epi16(src_2, coeff_2);
__m128i src_4 = _mm_unpacklo_epi8(_mm_srli_si128(src, 4), zero);
__m128i res_4 = _mm_madd_epi16(src_4, coeff_4);
__m128i src_6 = _mm_unpacklo_epi8(_mm_srli_si128(src, 6), zero);
__m128i res_6 = _mm_madd_epi16(src_6, coeff_6);
__m128i res_even = _mm_add_epi32(_mm_add_epi32(res_0, res_4),
_mm_add_epi32(res_2, res_6));
// Filter odd-index pixels
__m128i tmp_1 = filter[(sx + 1 * alpha) >> WARPEDDIFF_PREC_BITS];
__m128i tmp_3 = filter[(sx + 3 * alpha) >> WARPEDDIFF_PREC_BITS];
__m128i tmp_5 = filter[(sx + 5 * alpha) >> WARPEDDIFF_PREC_BITS];
__m128i tmp_7 = filter[(sx + 7 * alpha) >> WARPEDDIFF_PREC_BITS];
__m128i tmp_9 = _mm_unpacklo_epi32(tmp_1, tmp_3);
__m128i tmp_11 = _mm_unpacklo_epi32(tmp_5, tmp_7);
__m128i tmp_13 = _mm_unpackhi_epi32(tmp_1, tmp_3);
__m128i tmp_15 = _mm_unpackhi_epi32(tmp_5, tmp_7);
__m128i coeff_1 = _mm_unpacklo_epi64(tmp_9, tmp_11);
__m128i coeff_3 = _mm_unpackhi_epi64(tmp_9, tmp_11);
__m128i coeff_5 = _mm_unpacklo_epi64(tmp_13, tmp_15);
__m128i coeff_7 = _mm_unpackhi_epi64(tmp_13, tmp_15);
__m128i src_1 = _mm_unpacklo_epi8(_mm_srli_si128(src, 1), zero);
__m128i res_1 = _mm_madd_epi16(src_1, coeff_1);
__m128i src_3 = _mm_unpacklo_epi8(_mm_srli_si128(src, 3), zero);
__m128i res_3 = _mm_madd_epi16(src_3, coeff_3);
__m128i src_5 = _mm_unpacklo_epi8(_mm_srli_si128(src, 5), zero);
__m128i res_5 = _mm_madd_epi16(src_5, coeff_5);
__m128i src_7 = _mm_unpacklo_epi8(_mm_srli_si128(src, 7), zero);
__m128i res_7 = _mm_madd_epi16(src_7, coeff_7);
__m128i res_odd = _mm_add_epi32(_mm_add_epi32(res_1, res_5),
_mm_add_epi32(res_3, res_7));
// Combine results into one register.
// We store the columns in the order 0, 2, 4, 6, 1, 3, 5, 7
// as this order helps with the vertical filter.
tmp[k + 7] = _mm_packs_epi32(res_even, res_odd);
}
}
// Vertical filter
for (k = -4; k < AOMMIN(4, p_row + p_height - i - 4); ++k) {
int sy = sy4 + gamma * (-4) + delta * k;
// Load per-pixel coefficients
__m128i coeffs[8];
for (l = 0; l < 8; ++l) {
coeffs[l] = ((__m128i *)warped_filter)[ROUND_POWER_OF_TWO_SIGNED(
sy, WARPEDDIFF_PREC_BITS) +
WARPEDPIXEL_PREC_SHIFTS];
sy += gamma;
}
for (k = -4; k < AOMMIN(4, p_height - i - 4); ++k) {
int sy = sy4 + gamma * (-4) + delta * k +
(1 << (WARPEDDIFF_PREC_BITS - 1)) +
(WARPEDPIXEL_PREC_SHIFTS << WARPEDDIFF_PREC_BITS);
// Load from tmp and rearrange pairs of consecutive rows into the
// column order 0 0 2 2 4 4 6 6; 1 1 3 3 5 5 7 7
__m128i *src = tmp + (k + 4);
__m128i src_0 = _mm_unpacklo_epi16(src[0], src[1]);
__m128i src_2 = _mm_unpacklo_epi16(src[2], src[3]);
__m128i src_4 = _mm_unpacklo_epi16(src[4], src[5]);
__m128i src_6 = _mm_unpacklo_epi16(src[6], src[7]);
// Filter even-index pixels
__m128i tmp_0 = filter[(sy + 0 * gamma) >> WARPEDDIFF_PREC_BITS];
__m128i tmp_2 = filter[(sy + 2 * gamma) >> WARPEDDIFF_PREC_BITS];
__m128i tmp_4 = filter[(sy + 4 * gamma) >> WARPEDDIFF_PREC_BITS];
__m128i tmp_6 = filter[(sy + 6 * gamma) >> WARPEDDIFF_PREC_BITS];
{
// Rearrange coefficients in the same way as for the horizontal filter
__m128i tmp_0 = _mm_unpacklo_epi32(coeffs[0], coeffs[2]);
__m128i tmp_1 = _mm_unpacklo_epi32(coeffs[1], coeffs[3]);
__m128i tmp_2 = _mm_unpacklo_epi32(coeffs[4], coeffs[6]);
__m128i tmp_3 = _mm_unpacklo_epi32(coeffs[5], coeffs[7]);
__m128i tmp_4 = _mm_unpackhi_epi32(coeffs[0], coeffs[2]);
__m128i tmp_5 = _mm_unpackhi_epi32(coeffs[1], coeffs[3]);
__m128i tmp_6 = _mm_unpackhi_epi32(coeffs[4], coeffs[6]);
__m128i tmp_7 = _mm_unpackhi_epi32(coeffs[5], coeffs[7]);
__m128i coeff_a = _mm_unpacklo_epi64(tmp_0, tmp_2);
__m128i coeff_b = _mm_unpacklo_epi64(tmp_1, tmp_3);
__m128i coeff_c = _mm_unpackhi_epi64(tmp_0, tmp_2);
__m128i coeff_d = _mm_unpackhi_epi64(tmp_1, tmp_3);
__m128i coeff_e = _mm_unpacklo_epi64(tmp_4, tmp_6);
__m128i coeff_f = _mm_unpacklo_epi64(tmp_5, tmp_7);
__m128i coeff_g = _mm_unpackhi_epi64(tmp_4, tmp_6);
__m128i coeff_h = _mm_unpackhi_epi64(tmp_5, tmp_7);
// Load from tmp and rearrange pairs of consecutive rows into the
// column order 0 0 2 2 4 4 6 6; 1 1 3 3 5 5 7 7
__m128i *src = tmp + (k + 4);
__m128i src_a = _mm_unpacklo_epi16(src[0], src[1]);
__m128i src_b = _mm_unpackhi_epi16(src[0], src[1]);
__m128i src_c = _mm_unpacklo_epi16(src[2], src[3]);
__m128i src_d = _mm_unpackhi_epi16(src[2], src[3]);
__m128i src_e = _mm_unpacklo_epi16(src[4], src[5]);
__m128i src_f = _mm_unpackhi_epi16(src[4], src[5]);
__m128i src_g = _mm_unpacklo_epi16(src[6], src[7]);
__m128i src_h = _mm_unpackhi_epi16(src[6], src[7]);
// Calculate the filtered pixels
__m128i res_a = _mm_madd_epi16(src_a, coeff_a);
__m128i res_b = _mm_madd_epi16(src_b, coeff_b);
__m128i res_c = _mm_madd_epi16(src_c, coeff_c);
__m128i res_d = _mm_madd_epi16(src_d, coeff_d);
__m128i res_e = _mm_madd_epi16(src_e, coeff_e);
__m128i res_f = _mm_madd_epi16(src_f, coeff_f);
__m128i res_g = _mm_madd_epi16(src_g, coeff_g);
__m128i res_h = _mm_madd_epi16(src_h, coeff_h);
__m128i res_even = _mm_add_epi32(_mm_add_epi32(res_a, res_c),
_mm_add_epi32(res_e, res_g));
__m128i res_odd = _mm_add_epi32(_mm_add_epi32(res_b, res_d),
_mm_add_epi32(res_f, res_h));
// Round and pack down to pixels
__m128i res_lo = _mm_unpacklo_epi32(res_even, res_odd);
__m128i res_hi = _mm_unpackhi_epi32(res_even, res_odd);
__m128i round_const =
_mm_set1_epi32((1 << (2 * WARPEDPIXEL_FILTER_BITS)) >> 1);
__m128i res_lo_round = _mm_srai_epi32(
_mm_add_epi32(res_lo, round_const), 2 * WARPEDPIXEL_FILTER_BITS);
__m128i res_hi_round = _mm_srai_epi32(
_mm_add_epi32(res_hi, round_const), 2 * WARPEDPIXEL_FILTER_BITS);
__m128i res_16bit = _mm_packs_epi32(res_lo_round, res_hi_round);
__m128i res_8bit = _mm_packus_epi16(res_16bit, res_16bit);
// Store, blending with 'pred' if needed
__m128i *p =
(__m128i *)&pred[(i - p_row + k + 4) * p_stride + (j - p_col)];
if (ref_frm) {
__m128i orig = _mm_loadl_epi64(p);
_mm_storel_epi64(p, _mm_avg_epu8(res_8bit, orig));
} else {
_mm_storel_epi64(p, res_8bit);
}
__m128i tmp_8 = _mm_unpacklo_epi32(tmp_0, tmp_2);
__m128i tmp_10 = _mm_unpacklo_epi32(tmp_4, tmp_6);
__m128i tmp_12 = _mm_unpackhi_epi32(tmp_0, tmp_2);
__m128i tmp_14 = _mm_unpackhi_epi32(tmp_4, tmp_6);
__m128i coeff_0 = _mm_unpacklo_epi64(tmp_8, tmp_10);
__m128i coeff_2 = _mm_unpackhi_epi64(tmp_8, tmp_10);
__m128i coeff_4 = _mm_unpacklo_epi64(tmp_12, tmp_14);
__m128i coeff_6 = _mm_unpackhi_epi64(tmp_12, tmp_14);
__m128i res_0 = _mm_madd_epi16(src_0, coeff_0);
__m128i res_2 = _mm_madd_epi16(src_2, coeff_2);
__m128i res_4 = _mm_madd_epi16(src_4, coeff_4);
__m128i res_6 = _mm_madd_epi16(src_6, coeff_6);
__m128i res_even = _mm_add_epi32(_mm_add_epi32(res_0, res_2),
_mm_add_epi32(res_4, res_6));
// Filter odd-index pixels
__m128i src_1 = _mm_unpackhi_epi16(src[0], src[1]);
__m128i src_3 = _mm_unpackhi_epi16(src[2], src[3]);
__m128i src_5 = _mm_unpackhi_epi16(src[4], src[5]);
__m128i src_7 = _mm_unpackhi_epi16(src[6], src[7]);
__m128i tmp_1 = filter[(sy + 1 * gamma) >> WARPEDDIFF_PREC_BITS];
__m128i tmp_3 = filter[(sy + 3 * gamma) >> WARPEDDIFF_PREC_BITS];
__m128i tmp_5 = filter[(sy + 5 * gamma) >> WARPEDDIFF_PREC_BITS];
__m128i tmp_7 = filter[(sy + 7 * gamma) >> WARPEDDIFF_PREC_BITS];
__m128i tmp_9 = _mm_unpacklo_epi32(tmp_1, tmp_3);
__m128i tmp_11 = _mm_unpacklo_epi32(tmp_5, tmp_7);
__m128i tmp_13 = _mm_unpackhi_epi32(tmp_1, tmp_3);
__m128i tmp_15 = _mm_unpackhi_epi32(tmp_5, tmp_7);
__m128i coeff_1 = _mm_unpacklo_epi64(tmp_9, tmp_11);
__m128i coeff_3 = _mm_unpackhi_epi64(tmp_9, tmp_11);
__m128i coeff_5 = _mm_unpacklo_epi64(tmp_13, tmp_15);
__m128i coeff_7 = _mm_unpackhi_epi64(tmp_13, tmp_15);
__m128i res_1 = _mm_madd_epi16(src_1, coeff_1);
__m128i res_3 = _mm_madd_epi16(src_3, coeff_3);
__m128i res_5 = _mm_madd_epi16(src_5, coeff_5);
__m128i res_7 = _mm_madd_epi16(src_7, coeff_7);
__m128i res_odd = _mm_add_epi32(_mm_add_epi32(res_1, res_3),
_mm_add_epi32(res_5, res_7));
// Rearrange pixels back into the order 0 ... 7
__m128i res_lo = _mm_unpacklo_epi32(res_even, res_odd);
__m128i res_hi = _mm_unpackhi_epi32(res_even, res_odd);
// Round and pack into 8 bits
__m128i round_const =
_mm_set1_epi32((1 << (2 * WARPEDPIXEL_FILTER_BITS)) >> 1);
__m128i res_lo_round = _mm_srai_epi32(
_mm_add_epi32(res_lo, round_const), 2 * WARPEDPIXEL_FILTER_BITS);
__m128i res_hi_round = _mm_srai_epi32(
_mm_add_epi32(res_hi, round_const), 2 * WARPEDPIXEL_FILTER_BITS);
__m128i res_16bit = _mm_packs_epi32(res_lo_round, res_hi_round);
__m128i res_8bit = _mm_packus_epi16(res_16bit, res_16bit);
// Store, blending with 'pred' if needed
__m128i *p = (__m128i *)&pred[(i + k + 4) * p_stride + j];
if (ref_frm) {
__m128i orig = _mm_loadl_epi64(p);
_mm_storel_epi64(p, _mm_avg_epu8(res_8bit, orig));
} else {
_mm_storel_epi64(p, res_8bit);
}
}
}
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment