convolve_2d_avx2.c 10.1 KB
Newer Older
1
/*
2
 * Copyright (c) 2017, Alliance for Open Media. All rights reserved
3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19
 *
 * This source code is subject to the terms of the BSD 2 Clause License and
 * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
 * was not distributed with this source code in the LICENSE file, you can
 * obtain it at www.aomedia.org/license/software. If the Alliance for Open
 * Media Patent License 1.0 was not distributed with this source code in the
 * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
 */

#include <immintrin.h>

#include "./aom_dsp_rtcd.h"
#include "aom_dsp/aom_convolve.h"
#include "aom_dsp/aom_dsp_common.h"
#include "aom_dsp/aom_filter.h"
#include "av1/common/convolve.h"

20 21
void av1_convolve_2d_avx2(const uint8_t *src, int src_stride, uint8_t *dst0,
                          int dst_stride0, int w, int h,
22 23 24 25
                          InterpFilterParams *filter_params_x,
                          InterpFilterParams *filter_params_y,
                          const int subpel_x_q4, const int subpel_y_q4,
                          ConvolveParams *conv_params) {
26 27
  CONV_BUF_TYPE *dst = conv_params->dst;
  int dst_stride = conv_params->dst_stride;
28
  const int bd = 8;
29 30
  (void)dst0;
  (void)dst_stride0;
31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76

  DECLARE_ALIGNED(32, int16_t,
                  im_block[(MAX_SB_SIZE + MAX_FILTER_TAP - 1) * MAX_SB_SIZE]);
  int im_h = h + filter_params_y->taps - 1;
  int im_stride = MAX_SB_SIZE;
  int i, j;
  const int fo_vert = filter_params_y->taps / 2 - 1;
  const int fo_horiz = filter_params_x->taps / 2 - 1;
  const int do_average = conv_params->do_average;
  const uint8_t *const src_ptr = src - fo_vert * src_stride - fo_horiz;

  const __m256i zero = _mm256_setzero_si256();
  /* Horizontal filter */
  {
    const int16_t *x_filter = av1_get_interp_filter_subpel_kernel(
        *filter_params_x, subpel_x_q4 & SUBPEL_MASK);

    const __m128i coeffs_x8 = _mm_loadu_si128((__m128i *)x_filter);
    // since not all compilers yet support _mm256_set_m128i()
    const __m256i coeffs_x = _mm256_insertf128_si256(
        _mm256_castsi128_si256(coeffs_x8), coeffs_x8, 1);

    // coeffs 0 1 0 1 2 3 2 3
    const __m256i tmp_0 = _mm256_unpacklo_epi32(coeffs_x, coeffs_x);
    // coeffs 4 5 4 5 6 7 6 7
    const __m256i tmp_1 = _mm256_unpackhi_epi32(coeffs_x, coeffs_x);

    // coeffs 0 1 0 1 0 1 0 1
    const __m256i coeff_01 = _mm256_unpacklo_epi64(tmp_0, tmp_0);
    // coeffs 2 3 2 3 2 3 2 3
    const __m256i coeff_23 = _mm256_unpackhi_epi64(tmp_0, tmp_0);
    // coeffs 4 5 4 5 4 5 4 5
    const __m256i coeff_45 = _mm256_unpacklo_epi64(tmp_1, tmp_1);
    // coeffs 6 7 6 7 6 7 6 7
    const __m256i coeff_67 = _mm256_unpackhi_epi64(tmp_1, tmp_1);

    const __m256i round_const = _mm256_set1_epi32(
        ((1 << conv_params->round_0) >> 1) + (1 << (bd + FILTER_BITS - 1)));
    const __m128i round_shift = _mm_cvtsi32_si128(conv_params->round_0);

    for (i = 0; i < im_h; ++i) {
      for (j = 0; j < w; j += 16) {
        const __m256i data = _mm256_permute4x64_epi64(
            _mm256_loadu_si256((__m256i *)&src_ptr[i * src_stride + j]),
            _MM_SHUFFLE(2, 1, 1, 0));

77 78 79
        const __m256i src_lo = _mm256_unpacklo_epi8(data, zero);
        const __m256i src_hi = _mm256_unpackhi_epi8(data, zero);

80
        // Filter even-index pixels
81 82
        const __m256i res_0 = _mm256_madd_epi16(src_lo, coeff_01);
        const __m256i src_2 = _mm256_alignr_epi8(src_hi, src_lo, 4);
83
        const __m256i res_2 = _mm256_madd_epi16(src_2, coeff_23);
84
        const __m256i src_4 = _mm256_alignr_epi8(src_hi, src_lo, 8);
85
        const __m256i res_4 = _mm256_madd_epi16(src_4, coeff_45);
86
        const __m256i src_6 = _mm256_alignr_epi8(src_hi, src_lo, 12);
87 88 89 90 91 92 93 94
        const __m256i res_6 = _mm256_madd_epi16(src_6, coeff_67);

        __m256i res_even = _mm256_add_epi32(_mm256_add_epi32(res_0, res_4),
                                            _mm256_add_epi32(res_2, res_6));
        res_even = _mm256_sra_epi32(_mm256_add_epi32(res_even, round_const),
                                    round_shift);

        // Filter odd-index pixels
95
        const __m256i src_1 = _mm256_alignr_epi8(src_hi, src_lo, 2);
96
        const __m256i res_1 = _mm256_madd_epi16(src_1, coeff_01);
97
        const __m256i src_3 = _mm256_alignr_epi8(src_hi, src_lo, 6);
98
        const __m256i res_3 = _mm256_madd_epi16(src_3, coeff_23);
99
        const __m256i src_5 = _mm256_alignr_epi8(src_hi, src_lo, 10);
100
        const __m256i res_5 = _mm256_madd_epi16(src_5, coeff_45);
101
        const __m256i src_7 = _mm256_alignr_epi8(src_hi, src_lo, 14);
102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227
        const __m256i res_7 = _mm256_madd_epi16(src_7, coeff_67);

        __m256i res_odd = _mm256_add_epi32(_mm256_add_epi32(res_1, res_5),
                                           _mm256_add_epi32(res_3, res_7));
        res_odd = _mm256_sra_epi32(_mm256_add_epi32(res_odd, round_const),
                                   round_shift);

        __m256i res = _mm256_packs_epi32(res_even, res_odd);
        _mm256_storeu_si256((__m256i *)&im_block[i * im_stride + j], res);
      }
    }
  }

  /* Vertical filter */
  {
    const int16_t *y_filter = av1_get_interp_filter_subpel_kernel(
        *filter_params_y, subpel_y_q4 & SUBPEL_MASK);

    const __m128i coeffs_y8 = _mm_loadu_si128((__m128i *)y_filter);
    const __m256i coeffs_y = _mm256_insertf128_si256(
        _mm256_castsi128_si256(coeffs_y8), coeffs_y8, 1);

    // coeffs 0 1 0 1 2 3 2 3
    const __m256i tmp_0 = _mm256_unpacklo_epi32(coeffs_y, coeffs_y);
    // coeffs 4 5 4 5 6 7 6 7
    const __m256i tmp_1 = _mm256_unpackhi_epi32(coeffs_y, coeffs_y);

    // coeffs 0 1 0 1 0 1 0 1
    const __m256i coeff_01 = _mm256_unpacklo_epi64(tmp_0, tmp_0);
    // coeffs 2 3 2 3 2 3 2 3
    const __m256i coeff_23 = _mm256_unpackhi_epi64(tmp_0, tmp_0);
    // coeffs 4 5 4 5 4 5 4 5
    const __m256i coeff_45 = _mm256_unpacklo_epi64(tmp_1, tmp_1);
    // coeffs 6 7 6 7 6 7 6 7
    const __m256i coeff_67 = _mm256_unpackhi_epi64(tmp_1, tmp_1);

    const __m256i round_const = _mm256_set1_epi32(
        ((1 << conv_params->round_1) >> 1) -
        (1 << (bd + 2 * FILTER_BITS - conv_params->round_0 - 1)));
    const __m128i round_shift = _mm_cvtsi32_si128(conv_params->round_1);

    for (i = 0; i < h; ++i) {
      for (j = 0; j < w; j += 16) {
        // Filter even-index pixels
        const int16_t *data = &im_block[i * im_stride + j];
        const __m256i src_0 =
            _mm256_unpacklo_epi16(*(__m256i *)(data + 0 * im_stride),
                                  *(__m256i *)(data + 1 * im_stride));
        const __m256i src_2 =
            _mm256_unpacklo_epi16(*(__m256i *)(data + 2 * im_stride),
                                  *(__m256i *)(data + 3 * im_stride));
        const __m256i src_4 =
            _mm256_unpacklo_epi16(*(__m256i *)(data + 4 * im_stride),
                                  *(__m256i *)(data + 5 * im_stride));
        const __m256i src_6 =
            _mm256_unpacklo_epi16(*(__m256i *)(data + 6 * im_stride),
                                  *(__m256i *)(data + 7 * im_stride));

        const __m256i res_0 = _mm256_madd_epi16(src_0, coeff_01);
        const __m256i res_2 = _mm256_madd_epi16(src_2, coeff_23);
        const __m256i res_4 = _mm256_madd_epi16(src_4, coeff_45);
        const __m256i res_6 = _mm256_madd_epi16(src_6, coeff_67);

        const __m256i res_even = _mm256_add_epi32(
            _mm256_add_epi32(res_0, res_2), _mm256_add_epi32(res_4, res_6));

        // Filter odd-index pixels
        const __m256i src_1 =
            _mm256_unpackhi_epi16(*(__m256i *)(data + 0 * im_stride),
                                  *(__m256i *)(data + 1 * im_stride));
        const __m256i src_3 =
            _mm256_unpackhi_epi16(*(__m256i *)(data + 2 * im_stride),
                                  *(__m256i *)(data + 3 * im_stride));
        const __m256i src_5 =
            _mm256_unpackhi_epi16(*(__m256i *)(data + 4 * im_stride),
                                  *(__m256i *)(data + 5 * im_stride));
        const __m256i src_7 =
            _mm256_unpackhi_epi16(*(__m256i *)(data + 6 * im_stride),
                                  *(__m256i *)(data + 7 * im_stride));

        const __m256i res_1 = _mm256_madd_epi16(src_1, coeff_01);
        const __m256i res_3 = _mm256_madd_epi16(src_3, coeff_23);
        const __m256i res_5 = _mm256_madd_epi16(src_5, coeff_45);
        const __m256i res_7 = _mm256_madd_epi16(src_7, coeff_67);

        const __m256i res_odd = _mm256_add_epi32(
            _mm256_add_epi32(res_1, res_3), _mm256_add_epi32(res_5, res_7));

        // Rearrange pixels back into the order 0 ... 7
        const __m256i res_lo = _mm256_unpacklo_epi32(res_even, res_odd);
        const __m256i res_hi = _mm256_unpackhi_epi32(res_even, res_odd);

        const __m256i res_lo_round = _mm256_sra_epi32(
            _mm256_add_epi32(res_lo, round_const), round_shift);
        const __m256i res_hi_round = _mm256_sra_epi32(
            _mm256_add_epi32(res_hi, round_const), round_shift);

        // Accumulate values into the destination buffer
        __m128i *const p = (__m128i *)&dst[i * dst_stride + j];
        if (do_average) {
          _mm_storeu_si128(
              p + 0, _mm_add_epi32(_mm_loadu_si128(p + 0),
                                   _mm256_extractf128_si256(res_lo_round, 0)));
          _mm_storeu_si128(
              p + 1, _mm_add_epi32(_mm_loadu_si128(p + 1),
                                   _mm256_extractf128_si256(res_hi_round, 0)));
          if (w - j > 8) {
            _mm_storeu_si128(p + 2, _mm_add_epi32(_mm_loadu_si128(p + 2),
                                                  _mm256_extractf128_si256(
                                                      res_lo_round, 1)));
            _mm_storeu_si128(p + 3, _mm_add_epi32(_mm_loadu_si128(p + 3),
                                                  _mm256_extractf128_si256(
                                                      res_hi_round, 1)));
          }
        } else {
          _mm_storeu_si128(p + 0, _mm256_extractf128_si256(res_lo_round, 0));
          _mm_storeu_si128(p + 1, _mm256_extractf128_si256(res_hi_round, 0));
          if (w - j > 8) {
            _mm_storeu_si128(p + 2, _mm256_extractf128_si256(res_lo_round, 1));
            _mm_storeu_si128(p + 3, _mm256_extractf128_si256(res_hi_round, 1));
          }
        }
      }
    }
  }
}