cdef_block_simd.h 15.2 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
/*
 * Copyright (c) 2016, Alliance for Open Media. All rights reserved
 *
 * This source code is subject to the terms of the BSD 2 Clause License and
 * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
 * was not distributed with this source code in the LICENSE file, you can
 * obtain it at www.aomedia.org/license/software. If the Alliance for Open
 * Media Patent License 1.0 was not distributed with this source code in the
 * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
 */

#include "./av1_rtcd.h"
Steinar Midtskogen's avatar
Steinar Midtskogen committed
13
#include "./cdef_block.h"
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56

/* partial A is a 16-bit vector of the form:
   [x8 x7 x6 x5 x4 x3 x2 x1] and partial B has the form:
   [0  y1 y2 y3 y4 y5 y6 y7].
   This function computes (x1^2+y1^2)*C1 + (x2^2+y2^2)*C2 + ...
   (x7^2+y2^7)*C7 + (x8^2+0^2)*C8 where the C1..C8 constants are in const1
   and const2. */
static INLINE v128 fold_mul_and_sum(v128 partiala, v128 partialb, v128 const1,
                                    v128 const2) {
  v128 tmp;
  /* Reverse partial B. */
  partialb = v128_shuffle_8(
      partialb, v128_from_32(0x0f0e0100, 0x03020504, 0x07060908, 0x0b0a0d0c));
  /* Interleave the x and y values of identical indices and pair x8 with 0. */
  tmp = partiala;
  partiala = v128_ziplo_16(partialb, partiala);
  partialb = v128_ziphi_16(partialb, tmp);
  /* Square and add the corresponding x and y values. */
  partiala = v128_madd_s16(partiala, partiala);
  partialb = v128_madd_s16(partialb, partialb);
  /* Multiply by constant. */
  partiala = v128_mullo_s32(partiala, const1);
  partialb = v128_mullo_s32(partialb, const2);
  /* Sum all results. */
  partiala = v128_add_32(partiala, partialb);
  return partiala;
}

static INLINE v128 hsum4(v128 x0, v128 x1, v128 x2, v128 x3) {
  v128 t0, t1, t2, t3;
  t0 = v128_ziplo_32(x1, x0);
  t1 = v128_ziplo_32(x3, x2);
  t2 = v128_ziphi_32(x1, x0);
  t3 = v128_ziphi_32(x3, x2);
  x0 = v128_ziplo_64(t1, t0);
  x1 = v128_ziphi_64(t1, t0);
  x2 = v128_ziplo_64(t3, t2);
  x3 = v128_ziphi_64(t3, t2);
  return v128_add_32(v128_add_32(x0, x1), v128_add_32(x2, x3));
}

/* Computes cost for directions 0, 5, 6 and 7. We can call this function again
   to compute the remaining directions. */
57
static INLINE v128 compute_directions(v128 lines[8], int32_t tmp_cost1[4]) {
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
  v128 partial4a, partial4b, partial5a, partial5b, partial7a, partial7b;
  v128 partial6;
  v128 tmp;
  /* Partial sums for lines 0 and 1. */
  partial4a = v128_shl_n_byte(lines[0], 14);
  partial4b = v128_shr_n_byte(lines[0], 2);
  partial4a = v128_add_16(partial4a, v128_shl_n_byte(lines[1], 12));
  partial4b = v128_add_16(partial4b, v128_shr_n_byte(lines[1], 4));
  tmp = v128_add_16(lines[0], lines[1]);
  partial5a = v128_shl_n_byte(tmp, 10);
  partial5b = v128_shr_n_byte(tmp, 6);
  partial7a = v128_shl_n_byte(tmp, 4);
  partial7b = v128_shr_n_byte(tmp, 12);
  partial6 = tmp;

  /* Partial sums for lines 2 and 3. */
  partial4a = v128_add_16(partial4a, v128_shl_n_byte(lines[2], 10));
  partial4b = v128_add_16(partial4b, v128_shr_n_byte(lines[2], 6));
  partial4a = v128_add_16(partial4a, v128_shl_n_byte(lines[3], 8));
  partial4b = v128_add_16(partial4b, v128_shr_n_byte(lines[3], 8));
  tmp = v128_add_16(lines[2], lines[3]);
  partial5a = v128_add_16(partial5a, v128_shl_n_byte(tmp, 8));
  partial5b = v128_add_16(partial5b, v128_shr_n_byte(tmp, 8));
  partial7a = v128_add_16(partial7a, v128_shl_n_byte(tmp, 6));
  partial7b = v128_add_16(partial7b, v128_shr_n_byte(tmp, 10));
  partial6 = v128_add_16(partial6, tmp);

  /* Partial sums for lines 4 and 5. */
  partial4a = v128_add_16(partial4a, v128_shl_n_byte(lines[4], 6));
  partial4b = v128_add_16(partial4b, v128_shr_n_byte(lines[4], 10));
  partial4a = v128_add_16(partial4a, v128_shl_n_byte(lines[5], 4));
  partial4b = v128_add_16(partial4b, v128_shr_n_byte(lines[5], 12));
  tmp = v128_add_16(lines[4], lines[5]);
  partial5a = v128_add_16(partial5a, v128_shl_n_byte(tmp, 6));
  partial5b = v128_add_16(partial5b, v128_shr_n_byte(tmp, 10));
  partial7a = v128_add_16(partial7a, v128_shl_n_byte(tmp, 8));
  partial7b = v128_add_16(partial7b, v128_shr_n_byte(tmp, 8));
  partial6 = v128_add_16(partial6, tmp);

  /* Partial sums for lines 6 and 7. */
  partial4a = v128_add_16(partial4a, v128_shl_n_byte(lines[6], 2));
  partial4b = v128_add_16(partial4b, v128_shr_n_byte(lines[6], 14));
  partial4a = v128_add_16(partial4a, lines[7]);
  tmp = v128_add_16(lines[6], lines[7]);
  partial5a = v128_add_16(partial5a, v128_shl_n_byte(tmp, 4));
  partial5b = v128_add_16(partial5b, v128_shr_n_byte(tmp, 12));
  partial7a = v128_add_16(partial7a, v128_shl_n_byte(tmp, 10));
  partial7b = v128_add_16(partial7b, v128_shr_n_byte(tmp, 6));
  partial6 = v128_add_16(partial6, tmp);

  /* Compute costs in terms of partial sums. */
  partial4a =
      fold_mul_and_sum(partial4a, partial4b, v128_from_32(210, 280, 420, 840),
                       v128_from_32(105, 120, 140, 168));
  partial7a =
      fold_mul_and_sum(partial7a, partial7b, v128_from_32(210, 420, 0, 0),
                       v128_from_32(105, 105, 105, 140));
  partial5a =
      fold_mul_and_sum(partial5a, partial5b, v128_from_32(210, 420, 0, 0),
                       v128_from_32(105, 105, 105, 140));
  partial6 = v128_madd_s16(partial6, partial6);
  partial6 = v128_mullo_s32(partial6, v128_dup_32(105));

  partial4a = hsum4(partial4a, partial5a, partial6, partial7a);
  v128_store_unaligned(tmp_cost1, partial4a);
123
  return partial4a;
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
}

/* transpose and reverse the order of the lines -- equivalent to a 90-degree
   counter-clockwise rotation of the pixels. */
static INLINE void array_reverse_transpose_8x8(v128 *in, v128 *res) {
  const v128 tr0_0 = v128_ziplo_16(in[1], in[0]);
  const v128 tr0_1 = v128_ziplo_16(in[3], in[2]);
  const v128 tr0_2 = v128_ziphi_16(in[1], in[0]);
  const v128 tr0_3 = v128_ziphi_16(in[3], in[2]);
  const v128 tr0_4 = v128_ziplo_16(in[5], in[4]);
  const v128 tr0_5 = v128_ziplo_16(in[7], in[6]);
  const v128 tr0_6 = v128_ziphi_16(in[5], in[4]);
  const v128 tr0_7 = v128_ziphi_16(in[7], in[6]);

  const v128 tr1_0 = v128_ziplo_32(tr0_1, tr0_0);
  const v128 tr1_1 = v128_ziplo_32(tr0_5, tr0_4);
  const v128 tr1_2 = v128_ziphi_32(tr0_1, tr0_0);
  const v128 tr1_3 = v128_ziphi_32(tr0_5, tr0_4);
  const v128 tr1_4 = v128_ziplo_32(tr0_3, tr0_2);
  const v128 tr1_5 = v128_ziplo_32(tr0_7, tr0_6);
  const v128 tr1_6 = v128_ziphi_32(tr0_3, tr0_2);
  const v128 tr1_7 = v128_ziphi_32(tr0_7, tr0_6);

  res[7] = v128_ziplo_64(tr1_1, tr1_0);
  res[6] = v128_ziphi_64(tr1_1, tr1_0);
  res[5] = v128_ziplo_64(tr1_3, tr1_2);
  res[4] = v128_ziphi_64(tr1_3, tr1_2);
  res[3] = v128_ziplo_64(tr1_5, tr1_4);
  res[2] = v128_ziphi_64(tr1_5, tr1_4);
  res[1] = v128_ziplo_64(tr1_7, tr1_6);
  res[0] = v128_ziphi_64(tr1_7, tr1_6);
}

Steinar Midtskogen's avatar
Steinar Midtskogen committed
157
158
int SIMD_FUNC(cdef_find_dir)(const uint16_t *img, int stride, int32_t *var,
                             int coeff_shift) {
159
160
161
162
163
164
165
166
167
168
169
  int i;
  int32_t cost[8];
  int32_t best_cost = 0;
  int best_dir = 0;
  v128 lines[8];
  for (i = 0; i < 8; i++) {
    lines[i] = v128_load_unaligned(&img[i * stride]);
    lines[i] =
        v128_sub_16(v128_shr_s16(lines[i], coeff_shift), v128_dup_16(128));
  }

170
171
172
173
174
175
176
177
178
179
180
181
182
#if defined(__SSE4_1__)
  /* Compute "mostly vertical" directions. */
  __m128i dir47 = compute_directions(lines, cost + 4);

  array_reverse_transpose_8x8(lines, lines);

  /* Compute "mostly horizontal" directions. */
  __m128i dir03 = compute_directions(lines, cost);

  __m128i max = _mm_max_epi32(dir03, dir47);
  max = _mm_max_epi32(max, _mm_shuffle_epi32(max, _MM_SHUFFLE(1, 0, 3, 2)));
  max = _mm_max_epi32(max, _mm_shuffle_epi32(max, _MM_SHUFFLE(2, 3, 0, 1)));
  best_cost = _mm_cvtsi128_si32(max);
183
184
185
186
  __m128i t =
      _mm_packs_epi32(_mm_cmpeq_epi32(max, dir03), _mm_cmpeq_epi32(max, dir47));
  best_dir = _mm_movemask_epi8(_mm_packs_epi16(t, t));
  best_dir = get_msb(best_dir ^ (best_dir - 1));  // Count trailing zeros
187
#else
188
189
190
191
192
193
194
195
196
197
198
199
200
201
  /* Compute "mostly vertical" directions. */
  compute_directions(lines, cost + 4);

  array_reverse_transpose_8x8(lines, lines);

  /* Compute "mostly horizontal" directions. */
  compute_directions(lines, cost);

  for (i = 0; i < 8; i++) {
    if (cost[i] > best_cost) {
      best_cost = cost[i];
      best_dir = i;
    }
  }
202
#endif
203
204
205
206
207
208
209
210
211
212

  /* Difference between the optimal variance and the variance along the
     orthogonal direction. Again, the sum(x^2) terms cancel out. */
  *var = best_cost - cost[(best_dir + 4) & 7];
  /* We'd normally divide by 840, but dividing by 1024 is close enough
     for what we're going to do with this. */
  *var >>= 10;
  return best_dir;
}

Steinar Midtskogen's avatar
Steinar Midtskogen committed
213
214
215
216
217
218
219
220
221
222
223
224
225
// sign(a-b) * min(abs(a-b), max(0, threshold - (abs(a-b) >> adjdamp)))
SIMD_INLINE v128 constrain16(v128 a, v128 b, unsigned int threshold,
                             unsigned int adjdamp) {
  v128 diff = v128_sub_16(a, b);
  const v128 sign = v128_shr_n_s16(diff, 15);
  diff = v128_abs_s16(diff);
  const v128 s =
      v128_ssub_u16(v128_dup_16(threshold), v128_shr_u16(diff, adjdamp));
  return v128_xor(v128_add_16(sign, v128_min_s16(diff, s)), sign);
}

void SIMD_FUNC(cdef_direction_4x4)(uint16_t *y, int ystride, const uint16_t *in,
                                   int threshold, int dir, int damping) {
226
  int i;
227
  v128 p0, p1, sum, row, res;
Steinar Midtskogen's avatar
Steinar Midtskogen committed
228
229
  int o1 = cdef_directions[dir][0];
  int o2 = cdef_directions[dir][1];
230
231

  if (threshold) damping -= get_msb(threshold);
232
233
  for (i = 0; i < 4; i += 2) {
    sum = v128_zero();
Steinar Midtskogen's avatar
Steinar Midtskogen committed
234
235
    row = v128_from_v64(v64_load_aligned(&in[i * CDEF_BSTRIDE]),
                        v64_load_aligned(&in[(i + 1) * CDEF_BSTRIDE]));
236

Steinar Midtskogen's avatar
Steinar Midtskogen committed
237
238
239
    // p0 = constrain16(in[i*CDEF_BSTRIDE + offset], row, threshold, damping)
    p0 = v128_from_v64(v64_load_unaligned(&in[i * CDEF_BSTRIDE + o1]),
                       v64_load_unaligned(&in[(i + 1) * CDEF_BSTRIDE + o1]));
240
241
    p0 = constrain16(p0, row, threshold, damping);

Steinar Midtskogen's avatar
Steinar Midtskogen committed
242
243
244
    // p1 = constrain16(in[i*CDEF_BSTRIDE - offset], row, threshold, damping)
    p1 = v128_from_v64(v64_load_unaligned(&in[i * CDEF_BSTRIDE - o1]),
                       v64_load_unaligned(&in[(i + 1) * CDEF_BSTRIDE - o1]));
245
246
247
248
249
    p1 = constrain16(p1, row, threshold, damping);

    // sum += 4 * (p0 + p1)
    sum = v128_add_16(sum, v128_shl_n_16(v128_add_16(p0, p1), 2));

Steinar Midtskogen's avatar
Steinar Midtskogen committed
250
251
252
    // p0 = constrain16(in[i*CDEF_BSTRIDE + offset], row, threshold, damping)
    p0 = v128_from_v64(v64_load_unaligned(&in[i * CDEF_BSTRIDE + o2]),
                       v64_load_unaligned(&in[(i + 1) * CDEF_BSTRIDE + o2]));
253
254
    p0 = constrain16(p0, row, threshold, damping);

Steinar Midtskogen's avatar
Steinar Midtskogen committed
255
256
257
    // p1 = constrain16(in[i*CDEF_BSTRIDE - offset], row, threshold, damping)
    p1 = v128_from_v64(v64_load_unaligned(&in[i * CDEF_BSTRIDE - o2]),
                       v64_load_unaligned(&in[(i + 1) * CDEF_BSTRIDE - o2]));
258
259
260
261
262
263
    p1 = constrain16(p1, row, threshold, damping);

    // sum += 1 * (p0 + p1)
    sum = v128_add_16(sum, v128_add_16(p0, p1));

    // res = row + ((sum + 8) >> 4)
264
265
266
    res = v128_add_16(sum, v128_dup_16(8));
    res = v128_shr_n_s16(res, 4);
    res = v128_add_16(row, res);
267
268
    v64_store_aligned(&y[i * ystride], v128_high_v64(res));
    v64_store_aligned(&y[(i + 1) * ystride], v128_low_v64(res));
269
270
271
  }
}

Steinar Midtskogen's avatar
Steinar Midtskogen committed
272
273
void SIMD_FUNC(cdef_direction_8x8)(uint16_t *y, int ystride, const uint16_t *in,
                                   int threshold, int dir, int damping) {
274
  int i;
275
  v128 sum, p0, p1, row, res;
Steinar Midtskogen's avatar
Steinar Midtskogen committed
276
277
278
  int o1 = cdef_directions[dir][0];
  int o2 = cdef_directions[dir][1];
  int o3 = cdef_directions[dir][2];
279
280

  if (threshold) damping -= get_msb(threshold);
281
282
  for (i = 0; i < 8; i++) {
    sum = v128_zero();
Steinar Midtskogen's avatar
Steinar Midtskogen committed
283
    row = v128_load_aligned(&in[i * CDEF_BSTRIDE]);
284

Steinar Midtskogen's avatar
Steinar Midtskogen committed
285
286
    // p0 = constrain16(in[i*CDEF_BSTRIDE + offset], row, threshold, damping)
    p0 = v128_load_unaligned(&in[i * CDEF_BSTRIDE + o1]);
287
288
    p0 = constrain16(p0, row, threshold, damping);

Steinar Midtskogen's avatar
Steinar Midtskogen committed
289
290
    // p1 = constrain16(in[i*CDEF_BSTRIDE - offset], row, threshold, damping)
    p1 = v128_load_unaligned(&in[i * CDEF_BSTRIDE - o1]);
291
292
293
    p1 = constrain16(p1, row, threshold, damping);

    // sum += 3 * (p0 + p1)
294
295
296
297
    p0 = v128_add_16(p0, p1);
    p0 = v128_add_16(p0, v128_shl_n_16(p0, 1));
    sum = v128_add_16(sum, p0);

Steinar Midtskogen's avatar
Steinar Midtskogen committed
298
299
    // p0 = constrain16(in[i*CDEF_BSTRIDE + offset], row, threshold, damping)
    p0 = v128_load_unaligned(&in[i * CDEF_BSTRIDE + o2]);
300
301
    p0 = constrain16(p0, row, threshold, damping);

Steinar Midtskogen's avatar
Steinar Midtskogen committed
302
303
    // p1 = constrain16(in[i*CDEF_BSTRIDE - offset], row, threshold, damping)
    p1 = v128_load_unaligned(&in[i * CDEF_BSTRIDE - o2]);
304
305
306
    p1 = constrain16(p1, row, threshold, damping);

    // sum += 2 * (p0 + p1)
307
308
309
    p0 = v128_shl_n_16(v128_add_16(p0, p1), 1);
    sum = v128_add_16(sum, p0);

Steinar Midtskogen's avatar
Steinar Midtskogen committed
310
311
    // p0 = constrain16(in[i*CDEF_BSTRIDE + offset], row, threshold, damping)
    p0 = v128_load_unaligned(&in[i * CDEF_BSTRIDE + o3]);
312
313
    p0 = constrain16(p0, row, threshold, damping);

Steinar Midtskogen's avatar
Steinar Midtskogen committed
314
315
    // p1 = constrain16(in[i*CDEF_BSTRIDE - offset], row, threshold, damping)
    p1 = v128_load_unaligned(&in[i * CDEF_BSTRIDE - o3]);
316
317
318
    p1 = constrain16(p1, row, threshold, damping);

    // sum += (p0 + p1)
319
320
    p0 = v128_add_16(p0, p1);
    sum = v128_add_16(sum, p0);
321

322
    // res = row + ((sum + 8) >> 4)
323
324
325
326
327
328
    res = v128_add_16(sum, v128_dup_16(8));
    res = v128_shr_n_s16(res, 4);
    res = v128_add_16(row, res);
    v128_store_unaligned(&y[i * ystride], res);
  }
}
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367

void SIMD_FUNC(copy_8x8_16bit_to_8bit)(uint8_t *dst, int dstride,
                                       const uint16_t *src, int sstride) {
  int i;
  for (i = 0; i < 8; i++) {
    v128 row = v128_load_unaligned(&src[i * sstride]);
    row = v128_pack_s16_u8(row, row);
    v64_store_unaligned(&dst[i * dstride], v128_low_v64(row));
  }
}

void SIMD_FUNC(copy_4x4_16bit_to_8bit)(uint8_t *dst, int dstride,
                                       const uint16_t *src, int sstride) {
  int i;
  for (i = 0; i < 4; i++) {
    v128 row = v128_load_unaligned(&src[i * sstride]);
    row = v128_pack_s16_u8(row, row);
    u32_store_unaligned(&dst[i * dstride], v128_low_u32(row));
  }
}

void SIMD_FUNC(copy_8x8_16bit_to_16bit)(uint16_t *dst, int dstride,
                                        const uint16_t *src, int sstride) {
  int i;
  for (i = 0; i < 8; i++) {
    v128 row = v128_load_unaligned(&src[i * sstride]);
    v128_store_unaligned(&dst[i * dstride], row);
  }
}

void SIMD_FUNC(copy_4x4_16bit_to_16bit)(uint16_t *dst, int dstride,
                                        const uint16_t *src, int sstride) {
  int i;
  for (i = 0; i < 4; i++) {
    v64 row = v64_load_unaligned(&src[i * sstride]);
    v64_store_unaligned(&dst[i * dstride], row);
  }
}

Michael Bebenita's avatar
Michael Bebenita committed
368
369
370
void SIMD_FUNC(copy_rect8_8bit_to_16bit)(uint16_t *dst, int dstride,
                                         const uint8_t *src, int sstride, int v,
                                         int h) {
371
  int i, j;
Michael Bebenita's avatar
Michael Bebenita committed
372
  for (i = 0; i < v; i++) {
373
    for (j = 0; j < (h & ~0x7); j += 8) {
374
375
376
      v64 row = v64_load_unaligned(&src[i * sstride + j]);
      v128_store_unaligned(&dst[i * dstride + j], v128_unpack_u8_s16(row));
    }
377
    for (; j < h; j++) {
378
379
      dst[i * dstride + j] = src[i * sstride + j];
    }
380
381
382
  }
}

Michael Bebenita's avatar
Michael Bebenita committed
383
384
385
void SIMD_FUNC(copy_rect8_16bit_to_16bit)(uint16_t *dst, int dstride,
                                          const uint16_t *src, int sstride,
                                          int v, int h) {
386
  int i, j;
Michael Bebenita's avatar
Michael Bebenita committed
387
  for (i = 0; i < v; i++) {
388
    for (j = 0; j < (h & ~0x7); j += 8) {
389
390
391
      v128 row = v128_load_unaligned(&src[i * sstride + j]);
      v128_store_unaligned(&dst[i * dstride + j], row);
    }
392
    for (; j < h; j++) {
393
394
      dst[i * dstride + j] = src[i * sstride + j];
    }
395
396
  }
}