clpf_simd.h 18.6 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
/*
 * Copyright (c) 2016, Alliance for Open Media. All rights reserved
 *
 * This source code is subject to the terms of the BSD 2 Clause License and
 * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
 * was not distributed with this source code in the LICENSE file, you can
 * obtain it at www.aomedia.org/license/software. If the Alliance for Open
 * Media Patent License 1.0 was not distributed with this source code in the
 * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
 */

#include "./aom_dsp_rtcd.h"
Steinar Midtskogen's avatar
Steinar Midtskogen committed
13
#include "aom_ports/mem.h"
Steinar Midtskogen's avatar
Steinar Midtskogen committed
14
15
#include "aom_ports/bitops.h"
#include "av1/common/clpf_simd_kernel.h"
16

Steinar Midtskogen's avatar
Steinar Midtskogen committed
17
// Process blocks of width 8, two lines at a time, 8 bit.
18
19
20
static void clpf_block8(uint8_t *dst, const uint16_t *src, int dstride,
                        int sstride, int sizey, unsigned int strength,
                        unsigned int dmp) {
21
22
23
  int y;

  for (y = 0; y < sizey; y += 2) {
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
    const v128 l1 = v128_load_aligned(src);
    const v128 l2 = v128_load_aligned(src + sstride);
    const v128 l3 = v128_load_aligned(src - sstride);
    const v128 l4 = v128_load_aligned(src + 2 * sstride);
    const v128 a = v128_pack_s16_u8(v128_load_aligned(src - 2 * sstride), l3);
    const v128 b = v128_pack_s16_u8(l3, l1);
    const v128 g = v128_pack_s16_u8(l2, l4);
    const v128 h = v128_pack_s16_u8(l4, v128_load_aligned(src + 3 * sstride));
    const v128 c = v128_pack_s16_u8(v128_load_unaligned(src - 2),
                                    v128_load_unaligned(src - 2 + sstride));
    const v128 d = v128_pack_s16_u8(v128_load_unaligned(src - 1),
                                    v128_load_unaligned(src - 1 + sstride));
    const v128 e = v128_pack_s16_u8(v128_load_unaligned(src + 1),
                                    v128_load_unaligned(src + 1 + sstride));
    const v128 f = v128_pack_s16_u8(v128_load_unaligned(src + 2),
                                    v128_load_unaligned(src + 2 + sstride));
    const v128 o = calc_delta(v128_pack_s16_u8(l1, l2), a, b, c, d, e, f, g, h,
41
                              strength, dmp);
42
43
44
45
46
47
48
49

    v64_store_aligned(dst, v128_high_v64(o));
    v64_store_aligned(dst + dstride, v128_low_v64(o));
    src += sstride * 2;
    dst += dstride * 2;
  }
}

Steinar Midtskogen's avatar
Steinar Midtskogen committed
50
// Process blocks of width 4, four lines at a time, 8 bit.
51
52
53
static void clpf_block4(uint8_t *dst, const uint16_t *src, int dstride,
                        int sstride, int sizey, unsigned int strength,
                        unsigned int dmp) {
54
55
  int y;

56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
  for (y = 0; y < sizey; y += 4) {
    const v64 l0 = v64_load_aligned(src - 2 * sstride);
    const v64 l1 = v64_load_aligned(src - sstride);
    const v64 l2 = v64_load_aligned(src);
    const v64 l3 = v64_load_aligned(src + sstride);
    const v64 l4 = v64_load_aligned(src + 2 * sstride);
    const v64 l5 = v64_load_aligned(src + 3 * sstride);
    const v64 l6 = v64_load_aligned(src + 4 * sstride);
    const v64 l7 = v64_load_aligned(src + 5 * sstride);
    const v128 a =
        v128_pack_s16_u8(v128_from_v64(l0, l1), v128_from_v64(l2, l3));
    const v128 b =
        v128_pack_s16_u8(v128_from_v64(l1, l2), v128_from_v64(l3, l4));
    const v128 g =
        v128_pack_s16_u8(v128_from_v64(l3, l4), v128_from_v64(l5, l6));
    const v128 h =
        v128_pack_s16_u8(v128_from_v64(l4, l5), v128_from_v64(l6, l7));
    const v128 c = v128_pack_s16_u8(
        v128_from_v64(v64_load_unaligned(src - 2),
                      v64_load_unaligned(src + sstride - 2)),
        v128_from_v64(v64_load_unaligned(src + 2 * sstride - 2),
                      v64_load_unaligned(src + 3 * sstride - 2)));
    const v128 d = v128_pack_s16_u8(
        v128_from_v64(v64_load_unaligned(src - 1),
                      v64_load_unaligned(src + sstride - 1)),
        v128_from_v64(v64_load_unaligned(src + 2 * sstride - 1),
                      v64_load_unaligned(src + 3 * sstride - 1)));
    const v128 e = v128_pack_s16_u8(
        v128_from_v64(v64_load_unaligned(src + 1),
                      v64_load_unaligned(src + sstride + 1)),
        v128_from_v64(v64_load_unaligned(src + 2 * sstride + 1),
                      v64_load_unaligned(src + 3 * sstride + 1)));
    const v128 f = v128_pack_s16_u8(
        v128_from_v64(v64_load_unaligned(src + 2),
                      v64_load_unaligned(src + sstride + 2)),
        v128_from_v64(v64_load_unaligned(src + 2 * sstride + 2),
                      v64_load_unaligned(src + 3 * sstride + 2)));

    const v128 o = calc_delta(
        v128_pack_s16_u8(v128_from_v64(l2, l3), v128_from_v64(l4, l5)), a, b, c,
        d, e, f, g, h, strength, dmp);

    u32_store_aligned(dst, v128_low_u32(v128_shr_n_byte(o, 12)));
    u32_store_aligned(dst + dstride, v128_low_u32(v128_shr_n_byte(o, 8)));
    u32_store_aligned(dst + 2 * dstride, v128_low_u32(v128_shr_n_byte(o, 4)));
    u32_store_aligned(dst + 3 * dstride, v128_low_u32(o));

    dst += 4 * dstride;
    src += 4 * sstride;
  }
}

static void clpf_hblock8(uint8_t *dst, const uint16_t *src, int dstride,
                         int sstride, int sizey, unsigned int strength,
                         unsigned int dmp) {
  int y;

  for (y = 0; y < sizey; y += 2) {
    const v128 l1 = v128_load_aligned(src);
    const v128 l2 = v128_load_aligned(src + sstride);
    const v128 a = v128_pack_s16_u8(v128_load_unaligned(src - 2),
                                    v128_load_unaligned(src - 2 + sstride));
    const v128 b = v128_pack_s16_u8(v128_load_unaligned(src - 1),
                                    v128_load_unaligned(src - 1 + sstride));
    const v128 c = v128_pack_s16_u8(v128_load_unaligned(src + 1),
                                    v128_load_unaligned(src + 1 + sstride));
    const v128 d = v128_pack_s16_u8(v128_load_unaligned(src + 2),
                                    v128_load_unaligned(src + 2 + sstride));
    const v128 o =
        calc_hdelta(v128_pack_s16_u8(l1, l2), a, b, c, d, strength, dmp);

    v64_store_aligned(dst, v128_high_v64(o));
    v64_store_aligned(dst + dstride, v128_low_v64(o));
    src += sstride * 2;
    dst += dstride * 2;
  }
}

// Process blocks of width 4, four lines at a time, 8 bit.
static void clpf_hblock4(uint8_t *dst, const uint16_t *src, int dstride,
                         int sstride, int sizey, unsigned int strength,
                         unsigned int dmp) {
  int y;
139
140

  for (y = 0; y < sizey; y += 4) {
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
    const v64 l0 = v64_load_aligned(src);
    const v64 l1 = v64_load_aligned(src + sstride);
    const v64 l2 = v64_load_aligned(src + 2 * sstride);
    const v64 l3 = v64_load_aligned(src + 3 * sstride);
    const v128 a = v128_pack_s16_u8(
        v128_from_v64(v64_load_unaligned(src - 2),
                      v64_load_unaligned(src + sstride - 2)),
        v128_from_v64(v64_load_unaligned(src + 2 * sstride - 2),
                      v64_load_unaligned(src + 3 * sstride - 2)));
    const v128 b = v128_pack_s16_u8(
        v128_from_v64(v64_load_unaligned(src - 1),
                      v64_load_unaligned(src + sstride - 1)),
        v128_from_v64(v64_load_unaligned(src + 2 * sstride - 1),
                      v64_load_unaligned(src + 3 * sstride - 1)));
    const v128 c = v128_pack_s16_u8(
        v128_from_v64(v64_load_unaligned(src + 1),
                      v64_load_unaligned(src + sstride + 1)),
        v128_from_v64(v64_load_unaligned(src + 2 * sstride + 1),
                      v64_load_unaligned(src + 3 * sstride + 1)));
    const v128 d = v128_pack_s16_u8(
        v128_from_v64(v64_load_unaligned(src + 2),
                      v64_load_unaligned(src + sstride + 2)),
        v128_from_v64(v64_load_unaligned(src + 2 * sstride + 2),
                      v64_load_unaligned(src + 3 * sstride + 2)));

    const v128 o = calc_hdelta(
        v128_pack_s16_u8(v128_from_v64(l0, l1), v128_from_v64(l2, l3)), a, b, c,
        d, strength, dmp);
169
170
171
172
173
174
175
176
177
178
179

    u32_store_aligned(dst, v128_low_u32(v128_shr_n_byte(o, 12)));
    u32_store_aligned(dst + dstride, v128_low_u32(v128_shr_n_byte(o, 8)));
    u32_store_aligned(dst + 2 * dstride, v128_low_u32(v128_shr_n_byte(o, 4)));
    u32_store_aligned(dst + 3 * dstride, v128_low_u32(o));

    dst += 4 * dstride;
    src += 4 * sstride;
  }
}

180
181
182
void SIMD_FUNC(aom_clpf_block)(uint8_t *dst, const uint16_t *src, int dstride,
                               int sstride, int sizex, int sizey,
                               unsigned int strength, unsigned int dmp) {
183
184
185
186
  if ((sizex != 4 && sizex != 8) || ((sizey & 3) && sizex == 4)) {
    // Fallback to C for odd sizes:
    // * block widths not 4 or 8
    // * block heights not a multiple of 4 if the block width is 4
187
    aom_clpf_block_c(dst, src, dstride, sstride, sizex, sizey, strength, dmp);
188
  } else {
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
    (sizex == 4 ? clpf_block4 : clpf_block8)(dst, src, dstride, sstride, sizey,
                                             strength, dmp);
  }
}

void SIMD_FUNC(aom_clpf_hblock)(uint8_t *dst, const uint16_t *src, int dstride,
                                int sstride, int sizex, int sizey,
                                unsigned int strength, unsigned int dmp) {
  if ((sizex != 4 && sizex != 8) || ((sizey & 3) && sizex == 4)) {
    // Fallback to C for odd sizes:
    // * block widths not 4 or 8
    // * block heights not a multiple of 4 if the block width is 4
    aom_clpf_hblock_c(dst, src, dstride, sstride, sizex, sizey, strength, dmp);
  } else {
    (sizex == 4 ? clpf_hblock4 : clpf_hblock8)(dst, src, dstride, sstride,
                                               sizey, strength, dmp);
205
206
  }
}
207

Steinar Midtskogen's avatar
Steinar Midtskogen committed
208
// sign(a - b) * max(0, abs(a - b) - max(0, abs(a - b) -
209
// strength + (abs(a - b) >> (dmp - log2(s)))))
Steinar Midtskogen's avatar
Steinar Midtskogen committed
210
SIMD_INLINE v128 constrain_hbd(v128 a, v128 b, unsigned int strength,
211
                               unsigned int dmp) {
Steinar Midtskogen's avatar
Steinar Midtskogen committed
212
213
214
215
216
  const v128 diff = v128_sub_16(v128_max_s16(a, b), v128_min_s16(a, b));
  const v128 sign = v128_cmpeq_16(v128_min_s16(a, b), a);  // -(a <= b)
  const v128 zero = v128_zero();
  const v128 s = v128_max_s16(
      zero, v128_sub_16(v128_dup_16(strength),
217
                        v128_shr_u16(diff, dmp - get_msb(strength))));
Steinar Midtskogen's avatar
Steinar Midtskogen committed
218
219
220
221
222
223
224
225
  return v128_sub_16(
      v128_xor(sign,
               v128_max_s16(
                   zero, v128_sub_16(
                             diff, v128_max_s16(zero, v128_sub_16(diff, s))))),
      sign);
}

226
227
228
229
// delta = 1/16 * constrain(a, x, s, dmp) + 3/16 * constrain(b, x, s, dmp) +
//         1/16 * constrain(c, x, s, dmp) + 3/16 * constrain(d, x, s, dmp) +
//         3/16 * constrain(e, x, s, dmp) + 1/16 * constrain(f, x, s, dmp) +
//         3/16 * constrain(g, x, s, dmp) + 1/16 * constrain(h, x, s, dmp)
Steinar Midtskogen's avatar
Steinar Midtskogen committed
230
231
SIMD_INLINE v128 calc_delta_hbd(v128 x, v128 a, v128 b, v128 c, v128 d, v128 e,
                                v128 f, v128 g, v128 h, unsigned int s,
232
                                unsigned int dmp) {
Steinar Midtskogen's avatar
Steinar Midtskogen committed
233
  const v128 bdeg = v128_add_16(
234
235
      v128_add_16(constrain_hbd(b, x, s, dmp), constrain_hbd(d, x, s, dmp)),
      v128_add_16(constrain_hbd(e, x, s, dmp), constrain_hbd(g, x, s, dmp)));
236
237
  const v128 delta = v128_add_16(
      v128_add_16(
238
239
240
          v128_add_16(constrain_hbd(a, x, s, dmp), constrain_hbd(c, x, s, dmp)),
          v128_add_16(constrain_hbd(f, x, s, dmp),
                      constrain_hbd(h, x, s, dmp))),
Steinar Midtskogen's avatar
Steinar Midtskogen committed
241
      v128_add_16(v128_add_16(bdeg, bdeg), bdeg));
Steinar Midtskogen's avatar
Steinar Midtskogen committed
242
  return v128_add_16(
Steinar Midtskogen's avatar
Steinar Midtskogen committed
243
244
245
246
247
      x,
      v128_shr_s16(
          v128_add_16(v128_dup_16(8),
                      v128_add_16(delta, v128_cmplt_s16(delta, v128_zero()))),
          4));
248
249
}

Steinar Midtskogen's avatar
Steinar Midtskogen committed
250
static void calc_delta_hbd4(v128 o, v128 a, v128 b, v128 c, v128 d, v128 e,
Steinar Midtskogen's avatar
Steinar Midtskogen committed
251
                            v128 f, v128 g, v128 h, uint16_t *dst,
252
253
                            unsigned int s, unsigned int dmp, int dstride) {
  o = calc_delta_hbd(o, a, b, c, d, e, f, g, h, s, dmp);
Steinar Midtskogen's avatar
Steinar Midtskogen committed
254
255
256
257
258
  v64_store_aligned(dst, v128_high_v64(o));
  v64_store_aligned(dst + dstride, v128_low_v64(o));
}

static void calc_delta_hbd8(v128 o, v128 a, v128 b, v128 c, v128 d, v128 e,
Steinar Midtskogen's avatar
Steinar Midtskogen committed
259
                            v128 f, v128 g, v128 h, uint16_t *dst,
260
261
                            unsigned int s, unsigned int dmp) {
  v128_store_aligned(dst, calc_delta_hbd(o, a, b, c, d, e, f, g, h, s, dmp));
Steinar Midtskogen's avatar
Steinar Midtskogen committed
262
263
}

264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
// delta = 1/16 * constrain(a, x, s, dmp) + 3/16 * constrain(b, x, s, dmp) +
//         3/16 * constrain(c, x, s, dmp) + 1/16 * constrain(d, x, s, dmp)
SIMD_INLINE v128 calc_hdelta_hbd(v128 x, v128 a, v128 b, v128 c, v128 d,
                                 unsigned int s, unsigned int dmp) {
  const v128 bc =
      v128_add_16(constrain_hbd(b, x, s, dmp), constrain_hbd(c, x, s, dmp));
  const v128 delta = v128_add_16(
      v128_add_16(constrain_hbd(a, x, s, dmp), constrain_hbd(d, x, s, dmp)),
      v128_add_16(v128_add_16(bc, bc), bc));
  return v128_add_16(
      x,
      v128_shr_s16(
          v128_add_16(v128_dup_16(4),
                      v128_add_16(delta, v128_cmplt_s16(delta, v128_zero()))),
          3));
}

static void calc_hdelta_hbd4(v128 o, v128 a, v128 b, v128 c, v128 d,
                             uint16_t *dst, unsigned int s, unsigned int dmp,
                             int dstride) {
  o = calc_hdelta_hbd(o, a, b, c, d, s, dmp);
  v64_store_aligned(dst, v128_high_v64(o));
  v64_store_aligned(dst + dstride, v128_low_v64(o));
}

static void calc_hdelta_hbd8(v128 o, v128 a, v128 b, v128 c, v128 d,
                             uint16_t *dst, unsigned int s, unsigned int dmp) {
  v128_store_aligned(dst, calc_hdelta_hbd(o, a, b, c, d, s, dmp));
}

Steinar Midtskogen's avatar
Steinar Midtskogen committed
294
// Process blocks of width 4, two lines at time.
295
296
297
SIMD_INLINE void clpf_block_hbd4(uint16_t *dst, const uint16_t *src,
                                 int dstride, int sstride, int sizey,
                                 unsigned int strength, unsigned int dmp) {
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
  int y;

  for (y = 0; y < sizey; y += 2) {
    const v64 l1 = v64_load_aligned(src);
    const v64 l2 = v64_load_aligned(src + sstride);
    const v64 l3 = v64_load_aligned(src - sstride);
    const v64 l4 = v64_load_aligned(src + 2 * sstride);
    const v128 a = v128_from_v64(v64_load_aligned(src - 2 * sstride), l3);
    const v128 b = v128_from_v64(l3, l1);
    const v128 g = v128_from_v64(l2, l4);
    const v128 h = v128_from_v64(l4, v64_load_aligned(src + 3 * sstride));
    const v128 c = v128_from_v64(v64_load_unaligned(src - 2),
                                 v64_load_unaligned(src - 2 + sstride));
    const v128 d = v128_from_v64(v64_load_unaligned(src - 1),
                                 v64_load_unaligned(src - 1 + sstride));
    const v128 e = v128_from_v64(v64_load_unaligned(src + 1),
                                 v64_load_unaligned(src + 1 + sstride));
    const v128 f = v128_from_v64(v64_load_unaligned(src + 2),
                                 v64_load_unaligned(src + 2 + sstride));

    calc_delta_hbd4(v128_from_v64(l1, l2), a, b, c, d, e, f, g, h, dst,
319
                    strength, dmp, dstride);
320
321
322
323
324
    src += sstride * 2;
    dst += dstride * 2;
  }
}

Steinar Midtskogen's avatar
Steinar Midtskogen committed
325
// The most simple case.  Start here if you need to understand the functions.
326
327
328
SIMD_INLINE void clpf_block_hbd(uint16_t *dst, const uint16_t *src, int dstride,
                                int sstride, int sizey, unsigned int strength,
                                unsigned int dmp) {
329
330
331
332
333
334
335
336
337
338
339
340
341
  int y;

  for (y = 0; y < sizey; y++) {
    const v128 o = v128_load_aligned(src);
    const v128 a = v128_load_aligned(src - 2 * sstride);
    const v128 b = v128_load_aligned(src - 1 * sstride);
    const v128 g = v128_load_aligned(src + sstride);
    const v128 h = v128_load_aligned(src + 2 * sstride);
    const v128 c = v128_load_unaligned(src - 2);
    const v128 d = v128_load_unaligned(src - 1);
    const v128 e = v128_load_unaligned(src + 1);
    const v128 f = v128_load_unaligned(src + 2);

342
    calc_delta_hbd8(o, a, b, c, d, e, f, g, h, dst, strength, dmp);
343
344
345
346
347
    src += sstride;
    dst += dstride;
  }
}

348
// Process blocks of width 4, horizontal filter, two lines at time.
349
350
351
SIMD_INLINE void clpf_hblock_hbd4(uint16_t *dst, const uint16_t *src,
                                  int dstride, int sstride, int sizey,
                                  unsigned int strength, unsigned int dmp) {
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
  int y;

  for (y = 0; y < sizey; y += 2) {
    const v128 a = v128_from_v64(v64_load_unaligned(src - 2),
                                 v64_load_unaligned(src - 2 + sstride));
    const v128 b = v128_from_v64(v64_load_unaligned(src - 1),
                                 v64_load_unaligned(src - 1 + sstride));
    const v128 c = v128_from_v64(v64_load_unaligned(src + 1),
                                 v64_load_unaligned(src + 1 + sstride));
    const v128 d = v128_from_v64(v64_load_unaligned(src + 2),
                                 v64_load_unaligned(src + 2 + sstride));

    calc_hdelta_hbd4(v128_from_v64(v64_load_unaligned(src),
                                   v64_load_unaligned(src + sstride)),
                     a, b, c, d, dst, strength, dmp, dstride);
    src += sstride * 2;
    dst += dstride * 2;
  }
}

// Process blocks of width 8, horizontal filter, two lines at time.
373
374
375
SIMD_INLINE void clpf_hblock_hbd(uint16_t *dst, const uint16_t *src,
                                 int dstride, int sstride, int sizey,
                                 unsigned int strength, unsigned int dmp) {
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
  int y;

  for (y = 0; y < sizey; y++) {
    const v128 o = v128_load_aligned(src);
    const v128 a = v128_load_unaligned(src - 2);
    const v128 b = v128_load_unaligned(src - 1);
    const v128 c = v128_load_unaligned(src + 1);
    const v128 d = v128_load_unaligned(src + 2);

    calc_hdelta_hbd8(o, a, b, c, d, dst, strength, dmp);
    src += sstride;
    dst += dstride;
  }
}

391
392
393
void SIMD_FUNC(aom_clpf_block_hbd)(uint16_t *dst, const uint16_t *src,
                                   int dstride, int sstride, int sizex,
                                   int sizey, unsigned int strength,
394
                                   unsigned int dmp) {
395
396
397
398
  if ((sizex != 4 && sizex != 8) || ((sizey & 1) && sizex == 4)) {
    // Fallback to C for odd sizes:
    // * block width not 4 or 8
    // * block heights not a multiple of 2 if the block width is 4
399
400
    aom_clpf_block_hbd_c(dst, src, dstride, sstride, sizex, sizey, strength,
                         dmp);
401
  } else {
402
403
    (sizex == 4 ? clpf_block_hbd4 : clpf_block_hbd)(dst, src, dstride, sstride,
                                                    sizey, strength, dmp);
404
405
  }
}
406

407
408
409
void SIMD_FUNC(aom_clpf_hblock_hbd)(uint16_t *dst, const uint16_t *src,
                                    int dstride, int sstride, int sizex,
                                    int sizey, unsigned int strength,
410
411
412
413
414
                                    unsigned int dmp) {
  if ((sizex != 4 && sizex != 8) || ((sizey & 1) && sizex == 4)) {
    // Fallback to C for odd sizes:
    // * block width not 4 or 8
    // * block heights not a multiple of 2 if the block width is 4
415
416
    aom_clpf_hblock_hbd_c(dst, src, dstride, sstride, sizex, sizey, strength,
                          dmp);
417
418
  } else {
    (sizex == 4 ? clpf_hblock_hbd4 : clpf_hblock_hbd)(
419
        dst, src, dstride, sstride, sizey, strength, dmp);
420
421
  }
}