av1_loopfilter.c 94.4 KB
Newer Older
1
/*
2
 * Copyright (c) 2016, Alliance for Open Media. All rights reserved
3
 *
4 5 6 7 8 9
 * This source code is subject to the terms of the BSD 2 Clause License and
 * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
 * was not distributed with this source code in the LICENSE file, you can
 * obtain it at www.aomedia.org/license/software. If the Alliance for Open
 * Media Patent License 1.0 was not distributed with this source code in the
 * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
10 11
 */

12 13
#include <math.h>

14 15 16 17
#include "./aom_config.h"
#include "./aom_dsp_rtcd.h"
#include "aom_dsp/aom_dsp_common.h"
#include "aom_mem/aom_mem.h"
18
#include "aom_ports/mem.h"
19 20 21
#include "av1/common/av1_loopfilter.h"
#include "av1/common/onyxc_int.h"
#include "av1/common/reconinter.h"
22
#include "av1/common/seg_common.h"
23

24 25 26 27 28 29
#if CONFIG_LOOPFILTER_LEVEL
static const SEG_LVL_FEATURES seg_lvl_lf_lut[MAX_MB_PLANE][2] = {
  { SEG_LVL_ALT_LF_Y_V, SEG_LVL_ALT_LF_Y_H },
  { SEG_LVL_ALT_LF_U, SEG_LVL_ALT_LF_U },
  { SEG_LVL_ALT_LF_V, SEG_LVL_ALT_LF_V }
};
30 31 32 33 34 35

#if CONFIG_EXT_DELTA_Q
static const int delta_lf_id_lut[MAX_MB_PLANE][2] = {
  { 0, 1 }, { 2, 2 }, { 3, 3 }
};
#endif  // CONFIG_EXT_DELTA_Q
36 37
#endif  // CONFIG_LOOPFILTER_LEVEL

38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54
#if CONFIG_DEBLOCK_13TAP
#define PARALLEL_DEBLOCKING_5_TAP_CHROMA 1
#else
#define PARALLEL_DEBLOCKING_5_TAP_CHROMA 0
#endif

#if PARALLEL_DEBLOCKING_5_TAP_CHROMA
extern void aom_highbd_lpf_horizontal_6_c(uint16_t *s, int p,
                                          const uint8_t *blimit,
                                          const uint8_t *limit,
                                          const uint8_t *thresh, int bd);

extern void aom_highbd_lpf_vertical_6_c(uint16_t *s, int pitch,
                                        const uint8_t *blimit,
                                        const uint8_t *limit,
                                        const uint8_t *thresh, int bd);
#endif
55

56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72
// 64 bit masks for left transform size. Each 1 represents a position where
// we should apply a loop filter across the left border of an 8x8 block
// boundary.
//
// In the case of TX_16X16->  ( in low order byte first we end up with
// a mask that looks like this
//
//    10101010
//    10101010
//    10101010
//    10101010
//    10101010
//    10101010
//    10101010
//    10101010
//
// A loopfilter should be applied to every other 8x8 horizontally.
73
static const uint64_t left_64x64_txform_mask[TX_SIZES] = {
74 75 76 77
  0xffffffffffffffffULL,  // TX_4X4
  0xffffffffffffffffULL,  // TX_8x8
  0x5555555555555555ULL,  // TX_16x16
  0x1111111111111111ULL,  // TX_32x32
78 79 80
#if CONFIG_TX64X64
  0x0101010101010101ULL,  // TX_64x64
#endif                    // CONFIG_TX64X64
81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99
};

// 64 bit masks for above transform size. Each 1 represents a position where
// we should apply a loop filter across the top border of an 8x8 block
// boundary.
//
// In the case of TX_32x32 ->  ( in low order byte first we end up with
// a mask that looks like this
//
//    11111111
//    00000000
//    00000000
//    00000000
//    11111111
//    00000000
//    00000000
//    00000000
//
// A loopfilter should be applied to every other 4 the row vertically.
100
static const uint64_t above_64x64_txform_mask[TX_SIZES] = {
101 102 103 104
  0xffffffffffffffffULL,  // TX_4X4
  0xffffffffffffffffULL,  // TX_8x8
  0x00ff00ff00ff00ffULL,  // TX_16x16
  0x000000ff000000ffULL,  // TX_32x32
105 106 107
#if CONFIG_TX64X64
  0x00000000000000ffULL,  // TX_64x64
#endif                    // CONFIG_TX64X64
108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124
};

// 64 bit masks for prediction sizes (left). Each 1 represents a position
// where left border of an 8x8 block. These are aligned to the right most
// appropriate bit, and then shifted into place.
//
// In the case of TX_16x32 ->  ( low order byte first ) we end up with
// a mask that looks like this :
//
//  10000000
//  10000000
//  10000000
//  10000000
//  00000000
//  00000000
//  00000000
//  00000000
125
static const uint64_t left_prediction_mask[BLOCK_SIZES_ALL] = {
126 127 128 129 130 131 132 133 134 135 136 137
  0x0000000000000001ULL,  // BLOCK_4X4,
  0x0000000000000001ULL,  // BLOCK_4X8,
  0x0000000000000001ULL,  // BLOCK_8X4,
  0x0000000000000001ULL,  // BLOCK_8X8,
  0x0000000000000101ULL,  // BLOCK_8X16,
  0x0000000000000001ULL,  // BLOCK_16X8,
  0x0000000000000101ULL,  // BLOCK_16X16,
  0x0000000001010101ULL,  // BLOCK_16X32,
  0x0000000000000101ULL,  // BLOCK_32X16,
  0x0000000001010101ULL,  // BLOCK_32X32,
  0x0101010101010101ULL,  // BLOCK_32X64,
  0x0000000001010101ULL,  // BLOCK_64X32,
138 139 140 141
  0x0101010101010101ULL,  // BLOCK_64X64,
  0x0000000000000101ULL,  // BLOCK_4X16,
  0x0000000000000001ULL,  // BLOCK_16X4,
  0x0000000001010101ULL,  // BLOCK_8X32,
142 143 144
  0x0000000000000001ULL,  // BLOCK_32X8,
  0x0101010101010101ULL,  // BLOCK_16X64,
  0x0000000000000101ULL,  // BLOCK_64X16
145 146 147
};

// 64 bit mask to shift and set for each prediction size.
148
static const uint64_t above_prediction_mask[BLOCK_SIZES_ALL] = {
149 150 151 152 153 154 155 156 157 158 159 160
  0x0000000000000001ULL,  // BLOCK_4X4
  0x0000000000000001ULL,  // BLOCK_4X8
  0x0000000000000001ULL,  // BLOCK_8X4
  0x0000000000000001ULL,  // BLOCK_8X8
  0x0000000000000001ULL,  // BLOCK_8X16,
  0x0000000000000003ULL,  // BLOCK_16X8
  0x0000000000000003ULL,  // BLOCK_16X16
  0x0000000000000003ULL,  // BLOCK_16X32,
  0x000000000000000fULL,  // BLOCK_32X16,
  0x000000000000000fULL,  // BLOCK_32X32,
  0x000000000000000fULL,  // BLOCK_32X64,
  0x00000000000000ffULL,  // BLOCK_64X32,
161 162 163 164
  0x00000000000000ffULL,  // BLOCK_64X64,
  0x0000000000000001ULL,  // BLOCK_4X16,
  0x0000000000000003ULL,  // BLOCK_16X4,
  0x0000000000000001ULL,  // BLOCK_8X32,
165 166 167
  0x000000000000000fULL,  // BLOCK_32X8,
  0x0000000000000003ULL,  // BLOCK_16X64,
  0x00000000000000ffULL,  // BLOCK_64X16
168 169
};
// 64 bit mask to shift and set for each prediction size. A bit is set for
170
// each 8x8 block that would be in the top left most block of the given block
171
// size in the 64x64 block.
172
static const uint64_t size_mask[BLOCK_SIZES_ALL] = {
173 174 175 176 177 178 179 180 181 182 183 184
  0x0000000000000001ULL,  // BLOCK_4X4
  0x0000000000000001ULL,  // BLOCK_4X8
  0x0000000000000001ULL,  // BLOCK_8X4
  0x0000000000000001ULL,  // BLOCK_8X8
  0x0000000000000101ULL,  // BLOCK_8X16,
  0x0000000000000003ULL,  // BLOCK_16X8
  0x0000000000000303ULL,  // BLOCK_16X16
  0x0000000003030303ULL,  // BLOCK_16X32,
  0x0000000000000f0fULL,  // BLOCK_32X16,
  0x000000000f0f0f0fULL,  // BLOCK_32X32,
  0x0f0f0f0f0f0f0f0fULL,  // BLOCK_32X64,
  0x00000000ffffffffULL,  // BLOCK_64X32,
185 186 187 188
  0xffffffffffffffffULL,  // BLOCK_64X64,
  0x0000000000000101ULL,  // BLOCK_4X16,
  0x0000000000000003ULL,  // BLOCK_16X4,
  0x0000000001010101ULL,  // BLOCK_8X32,
189 190 191
  0x000000000000000fULL,  // BLOCK_32X8,
  0x0303030303030303ULL,  // BLOCK_16X64,
  0x000000000000ffffULL,  // BLOCK_64X16
192 193
};

194
// These are used for masking the left and above 32x32 borders.
195
static const uint64_t left_border = 0x1111111111111111ULL;
196 197 198
static const uint64_t above_border = 0x000000ff000000ffULL;

// 16 bit masks for uv transform sizes.
199
static const uint16_t left_64x64_txform_mask_uv[TX_SIZES] = {
200 201 202 203
  0xffff,  // TX_4X4
  0xffff,  // TX_8x8
  0x5555,  // TX_16x16
  0x1111,  // TX_32x32
204 205 206
#if CONFIG_TX64X64
  0x0101,  // TX_64x64, never used
#endif     // CONFIG_TX64X64
207 208
};

209
static const uint16_t above_64x64_txform_mask_uv[TX_SIZES] = {
210 211 212 213
  0xffff,  // TX_4X4
  0xffff,  // TX_8x8
  0x0f0f,  // TX_16x16
  0x000f,  // TX_32x32
214 215 216
#if CONFIG_TX64X64
  0x0003,  // TX_64x64, never used
#endif     // CONFIG_TX64X64
217 218 219
};

// 16 bit left mask to shift and set for each uv prediction size.
220
static const uint16_t left_prediction_mask_uv[BLOCK_SIZES_ALL] = {
221 222 223 224 225 226 227 228 229 230 231 232
  0x0001,  // BLOCK_4X4,
  0x0001,  // BLOCK_4X8,
  0x0001,  // BLOCK_8X4,
  0x0001,  // BLOCK_8X8,
  0x0001,  // BLOCK_8X16,
  0x0001,  // BLOCK_16X8,
  0x0001,  // BLOCK_16X16,
  0x0011,  // BLOCK_16X32,
  0x0001,  // BLOCK_32X16,
  0x0011,  // BLOCK_32X32,
  0x1111,  // BLOCK_32X64
  0x0011,  // BLOCK_64X32,
233 234 235 236
  0x1111,  // BLOCK_64X64,
  0x0001,  // BLOCK_4X16,
  0x0001,  // BLOCK_16X4,
  0x0011,  // BLOCK_8X32,
237 238 239
  0x0001,  // BLOCK_32X8,
  0x1111,  // BLOCK_16X64,
  0x0001,  // BLOCK_64X16,
240
};
241

242
// 16 bit above mask to shift and set for uv each prediction size.
243
static const uint16_t above_prediction_mask_uv[BLOCK_SIZES_ALL] = {
244 245 246 247 248 249 250 251 252 253 254 255
  0x0001,  // BLOCK_4X4
  0x0001,  // BLOCK_4X8
  0x0001,  // BLOCK_8X4
  0x0001,  // BLOCK_8X8
  0x0001,  // BLOCK_8X16,
  0x0001,  // BLOCK_16X8
  0x0001,  // BLOCK_16X16
  0x0001,  // BLOCK_16X32,
  0x0003,  // BLOCK_32X16,
  0x0003,  // BLOCK_32X32,
  0x0003,  // BLOCK_32X64,
  0x000f,  // BLOCK_64X32,
256 257 258 259
  0x000f,  // BLOCK_64X64,
  0x0001,  // BLOCK_4X16,
  0x0001,  // BLOCK_16X4,
  0x0001,  // BLOCK_8X32,
260 261 262
  0x0003,  // BLOCK_32X8,
  0x0001,  // BLOCK_16X64,
  0x000f,  // BLOCK_64X16
263 264 265
};

// 64 bit mask to shift and set for each uv prediction size
266
static const uint16_t size_mask_uv[BLOCK_SIZES_ALL] = {
267 268 269 270 271 272 273 274 275 276 277 278
  0x0001,  // BLOCK_4X4
  0x0001,  // BLOCK_4X8
  0x0001,  // BLOCK_8X4
  0x0001,  // BLOCK_8X8
  0x0001,  // BLOCK_8X16,
  0x0001,  // BLOCK_16X8
  0x0001,  // BLOCK_16X16
  0x0011,  // BLOCK_16X32,
  0x0003,  // BLOCK_32X16,
  0x0033,  // BLOCK_32X32,
  0x3333,  // BLOCK_32X64,
  0x00ff,  // BLOCK_64X32,
279 280 281 282
  0xffff,  // BLOCK_64X64,
  0x0001,  // BLOCK_4X16,
  0x0001,  // BLOCK_16X4,
  0x0011,  // BLOCK_8X32,
283 284 285
  0x0003,  // BLOCK_32X8,
  0x1111,  // BLOCK_16X64,
  0x000f,  // BLOCK_64X16
286
};
287
static const uint16_t left_border_uv = 0x1111;
288 289
static const uint16_t above_border_uv = 0x000f;

290
static const int mode_lf_lut[] = {
291 292
  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,  // INTRA_MODES
  1, 1, 0, 1,                             // INTER_MODES (GLOBALMV == 0)
Sarah Parker's avatar
Sarah Parker committed
293
  1, 1, 1, 1, 1, 1, 0, 1  // INTER_COMPOUND_MODES (GLOBAL_GLOBALMV == 0)
294 295 296 297 298 299 300 301 302 303 304 305 306 307 308
};

static void update_sharpness(loop_filter_info_n *lfi, int sharpness_lvl) {
  int lvl;

  // For each possible value for the loop filter fill out limits
  for (lvl = 0; lvl <= MAX_LOOP_FILTER; lvl++) {
    // Set loop filter parameters that control sharpness.
    int block_inside_limit = lvl >> ((sharpness_lvl > 0) + (sharpness_lvl > 4));

    if (sharpness_lvl > 0) {
      if (block_inside_limit > (9 - sharpness_lvl))
        block_inside_limit = (9 - sharpness_lvl);
    }

309
    if (block_inside_limit < 1) block_inside_limit = 1;
310 311 312 313 314 315

    memset(lfi->lfthr[lvl].lim, block_inside_limit, SIMD_WIDTH);
    memset(lfi->lfthr[lvl].mblim, (2 * (lvl + 2) + block_inside_limit),
           SIMD_WIDTH);
  }
}
316 317 318
#if CONFIG_EXT_DELTA_Q
static uint8_t get_filter_level(const AV1_COMMON *cm,
                                const loop_filter_info_n *lfi_n,
319
#if CONFIG_LOOPFILTER_LEVEL
320
                                const int dir_idx, int plane,
321
#endif
322 323 324
                                const MB_MODE_INFO *mbmi) {
  const int segment_id = mbmi->segment_id;
  if (cm->delta_lf_present_flag) {
325
#if CONFIG_LOOPFILTER_LEVEL
326 327 328 329 330 331 332
    int delta_lf;
    if (cm->delta_lf_multi) {
      const int delta_lf_idx = delta_lf_id_lut[plane][dir_idx];
      delta_lf = mbmi->curr_delta_lf[delta_lf_idx];
    } else {
      delta_lf = mbmi->current_delta_lf_from_base;
    }
333
    int lvl_seg =
334
        clamp(delta_lf + cm->lf.filter_level[dir_idx], 0, MAX_LOOP_FILTER);
335
#else
336 337
    int lvl_seg = clamp(mbmi->current_delta_lf_from_base + cm->lf.filter_level,
                        0, MAX_LOOP_FILTER);
338
#endif
339 340 341 342 343
#if CONFIG_LOOPFILTER_LEVEL
    assert(plane >= 0 && plane <= 2);
    const int seg_lf_feature_id = seg_lvl_lf_lut[plane][dir_idx];
    if (segfeature_active(&cm->seg, segment_id, seg_lf_feature_id)) {
      const int data = get_segdata(&cm->seg, segment_id, seg_lf_feature_id);
344
      lvl_seg = clamp(lvl_seg + data, 0, MAX_LOOP_FILTER);
345 346
    }
#else
347 348
    if (segfeature_active(&cm->seg, segment_id, SEG_LVL_ALT_LF)) {
      const int data = get_segdata(&cm->seg, segment_id, SEG_LVL_ALT_LF);
349
      lvl_seg = clamp(lvl_seg + data, 0, MAX_LOOP_FILTER);
350
    }
351
#endif  // CONFIG_LOOPFILTER_LEVEL
352

353
    if (cm->lf.mode_ref_delta_enabled) {
354
      const int scale = 1 << (lvl_seg >> 5);
355 356 357 358 359 360 361
      lvl_seg += cm->lf.ref_deltas[mbmi->ref_frame[0]] * scale;
      if (mbmi->ref_frame[0] > INTRA_FRAME)
        lvl_seg += cm->lf.mode_deltas[mode_lf_lut[mbmi->mode]] * scale;
      lvl_seg = clamp(lvl_seg, 0, MAX_LOOP_FILTER);
    }
    return lvl_seg;
  } else {
362
#if CONFIG_LOOPFILTER_LEVEL
363 364 365
    return lfi_n
        ->lvl[segment_id][dir_idx][mbmi->ref_frame[0]][mode_lf_lut[mbmi->mode]];
#else
366
    return lfi_n->lvl[segment_id][mbmi->ref_frame[0]][mode_lf_lut[mbmi->mode]];
367
#endif
368 369 370
  }
}
#else
371 372
static uint8_t get_filter_level(const loop_filter_info_n *lfi_n,
                                const MB_MODE_INFO *mbmi) {
373 374
  const int segment_id = mbmi->segment_id;
  return lfi_n->lvl[segment_id][mbmi->ref_frame[0]][mode_lf_lut[mbmi->mode]];
375
}
376
#endif
377

378
void av1_loop_filter_init(AV1_COMMON *cm) {
379
  assert(MB_MODE_COUNT == NELEMENTS(mode_lf_lut));
380 381 382 383 384 385 386 387 388 389 390 391 392
  loop_filter_info_n *lfi = &cm->lf_info;
  struct loopfilter *lf = &cm->lf;
  int lvl;

  // init limits for given sharpness
  update_sharpness(lfi, lf->sharpness_level);
  lf->last_sharpness_level = lf->sharpness_level;

  // init hev threshold const vectors
  for (lvl = 0; lvl <= MAX_LOOP_FILTER; lvl++)
    memset(lfi->lfthr[lvl].hev_thr, (lvl >> 4), SIMD_WIDTH);
}

393
void av1_loop_filter_frame_init(AV1_COMMON *cm, int default_filt_lvl,
394 395 396 397 398 399
                                int default_filt_lvl_r
#if CONFIG_LOOPFILTER_LEVEL
                                ,
                                int plane
#endif
                                ) {
400 401 402 403 404 405 406 407 408 409 410 411 412 413 414
  int seg_id;
  // n_shift is the multiplier for lf_deltas
  // the multiplier is 1 for when filter_lvl is between 0 and 31;
  // 2 when filter_lvl is between 32 and 63
  loop_filter_info_n *const lfi = &cm->lf_info;
  struct loopfilter *const lf = &cm->lf;
  const struct segmentation *const seg = &cm->seg;

  // update limits if sharpness has changed
  if (lf->last_sharpness_level != lf->sharpness_level) {
    update_sharpness(lfi, lf->sharpness_level);
    lf->last_sharpness_level = lf->sharpness_level;
  }

  for (seg_id = 0; seg_id < MAX_SEGMENTS; seg_id++) {
415 416 417 418 419 420 421
    for (int dir = 0; dir < 2; ++dir) {
      int lvl_seg = (dir == 0) ? default_filt_lvl : default_filt_lvl_r;
#if CONFIG_LOOPFILTER_LEVEL
      assert(plane >= 0 && plane <= 2);
      const int seg_lf_feature_id = seg_lvl_lf_lut[plane][dir];
      if (segfeature_active(seg, seg_id, seg_lf_feature_id)) {
        const int data = get_segdata(&cm->seg, seg_id, seg_lf_feature_id);
422
        lvl_seg = clamp(lvl_seg + data, 0, MAX_LOOP_FILTER);
423 424 425 426
      }
#else
      if (segfeature_active(seg, seg_id, SEG_LVL_ALT_LF)) {
        const int data = get_segdata(seg, seg_id, SEG_LVL_ALT_LF);
427
        lvl_seg = clamp(lvl_seg + data, 0, MAX_LOOP_FILTER);
428 429
      }
#endif  // CONFIG_LOOPFILTER_LEVEL
430

431 432 433 434 435 436 437 438 439 440
      if (!lf->mode_ref_delta_enabled) {
// we could get rid of this if we assume that deltas are set to
// zero when not in use; encoder always uses deltas
#if CONFIG_LOOPFILTER_LEVEL
        memset(lfi->lvl[seg_id][dir], lvl_seg, sizeof(lfi->lvl[seg_id][dir]));
#else
        memset(lfi->lvl[seg_id], lvl_seg, sizeof(lfi->lvl[seg_id]));
#endif  // CONFIG_LOOPFILTER_LEVEL
      } else {
        int ref, mode;
441
#if CONFIG_LOOPFILTER_LEVEL
442
        const int scale = 1 << (lvl_seg >> 5);
443 444 445 446 447 448 449 450 451 452 453 454 455
        const int intra_lvl = lvl_seg + lf->ref_deltas[INTRA_FRAME] * scale;
        lfi->lvl[seg_id][dir][INTRA_FRAME][0] =
            clamp(intra_lvl, 0, MAX_LOOP_FILTER);

        for (ref = LAST_FRAME; ref < TOTAL_REFS_PER_FRAME; ++ref) {
          for (mode = 0; mode < MAX_MODE_LF_DELTAS; ++mode) {
            const int inter_lvl = lvl_seg + lf->ref_deltas[ref] * scale +
                                  lf->mode_deltas[mode] * scale;
            lfi->lvl[seg_id][dir][ref][mode] =
                clamp(inter_lvl, 0, MAX_LOOP_FILTER);
          }
        }
#else
456
        const int scale = 1 << (default_filt_lvl >> 5);
457 458 459 460 461 462 463 464 465
        const int intra_lvl = lvl_seg + lf->ref_deltas[INTRA_FRAME] * scale;
        lfi->lvl[seg_id][INTRA_FRAME][0] = clamp(intra_lvl, 0, MAX_LOOP_FILTER);

        for (ref = LAST_FRAME; ref < TOTAL_REFS_PER_FRAME; ++ref) {
          for (mode = 0; mode < MAX_MODE_LF_DELTAS; ++mode) {
            const int inter_lvl = lvl_seg + lf->ref_deltas[ref] * scale +
                                  lf->mode_deltas[mode] * scale;
            lfi->lvl[seg_id][ref][mode] = clamp(inter_lvl, 0, MAX_LOOP_FILTER);
          }
466
        }
467
#endif
468
      }
469 470 471 472
    }
  }
}

473 474
static void filter_selectively_vert_row2(int subsampling_factor, uint8_t *s,
                                         int pitch, unsigned int mask_16x16_l,
475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502
                                         unsigned int mask_8x8_l,
                                         unsigned int mask_4x4_l,
                                         unsigned int mask_4x4_int_l,
                                         const loop_filter_info_n *lfi_n,
                                         const uint8_t *lfl) {
  const int mask_shift = subsampling_factor ? 4 : 8;
  const int mask_cutoff = subsampling_factor ? 0xf : 0xff;
  const int lfl_forward = subsampling_factor ? 4 : 8;

  unsigned int mask_16x16_0 = mask_16x16_l & mask_cutoff;
  unsigned int mask_8x8_0 = mask_8x8_l & mask_cutoff;
  unsigned int mask_4x4_0 = mask_4x4_l & mask_cutoff;
  unsigned int mask_4x4_int_0 = mask_4x4_int_l & mask_cutoff;
  unsigned int mask_16x16_1 = (mask_16x16_l >> mask_shift) & mask_cutoff;
  unsigned int mask_8x8_1 = (mask_8x8_l >> mask_shift) & mask_cutoff;
  unsigned int mask_4x4_1 = (mask_4x4_l >> mask_shift) & mask_cutoff;
  unsigned int mask_4x4_int_1 = (mask_4x4_int_l >> mask_shift) & mask_cutoff;
  unsigned int mask;

  for (mask = mask_16x16_0 | mask_8x8_0 | mask_4x4_0 | mask_4x4_int_0 |
              mask_16x16_1 | mask_8x8_1 | mask_4x4_1 | mask_4x4_int_1;
       mask; mask >>= 1) {
    const loop_filter_thresh *lfi0 = lfi_n->lfthr + *lfl;
    const loop_filter_thresh *lfi1 = lfi_n->lfthr + *(lfl + lfl_forward);

    if (mask & 1) {
      if ((mask_16x16_0 | mask_16x16_1) & 1) {
        if ((mask_16x16_0 & mask_16x16_1) & 1) {
503
          aom_lpf_vertical_16_dual(s, pitch, lfi0->mblim, lfi0->lim,
504 505
                                   lfi0->hev_thr);
        } else if (mask_16x16_0 & 1) {
506
          aom_lpf_vertical_16(s, pitch, lfi0->mblim, lfi0->lim, lfi0->hev_thr);
507
        } else {
508
          aom_lpf_vertical_16(s + 8 * pitch, pitch, lfi1->mblim, lfi1->lim,
509
                              lfi1->hev_thr);
510 511 512 513 514
        }
      }

      if ((mask_8x8_0 | mask_8x8_1) & 1) {
        if ((mask_8x8_0 & mask_8x8_1) & 1) {
515
          aom_lpf_vertical_8_dual(s, pitch, lfi0->mblim, lfi0->lim,
516 517 518
                                  lfi0->hev_thr, lfi1->mblim, lfi1->lim,
                                  lfi1->hev_thr);
        } else if (mask_8x8_0 & 1) {
519
          aom_lpf_vertical_8(s, pitch, lfi0->mblim, lfi0->lim, lfi0->hev_thr);
520
        } else {
521
          aom_lpf_vertical_8(s + 8 * pitch, pitch, lfi1->mblim, lfi1->lim,
522
                             lfi1->hev_thr);
523 524 525 526 527
        }
      }

      if ((mask_4x4_0 | mask_4x4_1) & 1) {
        if ((mask_4x4_0 & mask_4x4_1) & 1) {
528
          aom_lpf_vertical_4_dual(s, pitch, lfi0->mblim, lfi0->lim,
529 530 531
                                  lfi0->hev_thr, lfi1->mblim, lfi1->lim,
                                  lfi1->hev_thr);
        } else if (mask_4x4_0 & 1) {
532
          aom_lpf_vertical_4(s, pitch, lfi0->mblim, lfi0->lim, lfi0->hev_thr);
533
        } else {
534
          aom_lpf_vertical_4(s + 8 * pitch, pitch, lfi1->mblim, lfi1->lim,
535
                             lfi1->hev_thr);
536 537 538 539 540
        }
      }

      if ((mask_4x4_int_0 | mask_4x4_int_1) & 1) {
        if ((mask_4x4_int_0 & mask_4x4_int_1) & 1) {
541
          aom_lpf_vertical_4_dual(s + 4, pitch, lfi0->mblim, lfi0->lim,
542 543 544
                                  lfi0->hev_thr, lfi1->mblim, lfi1->lim,
                                  lfi1->hev_thr);
        } else if (mask_4x4_int_0 & 1) {
545
          aom_lpf_vertical_4(s + 4, pitch, lfi0->mblim, lfi0->lim,
546
                             lfi0->hev_thr);
547
        } else {
548
          aom_lpf_vertical_4(s + 8 * pitch + 4, pitch, lfi1->mblim, lfi1->lim,
549
                             lfi1->hev_thr);
550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566
        }
      }
    }

    s += 8;
    lfl += 1;
    mask_16x16_0 >>= 1;
    mask_8x8_0 >>= 1;
    mask_4x4_0 >>= 1;
    mask_4x4_int_0 >>= 1;
    mask_16x16_1 >>= 1;
    mask_8x8_1 >>= 1;
    mask_4x4_1 >>= 1;
    mask_4x4_int_1 >>= 1;
  }
}

567 568 569 570 571
static void highbd_filter_selectively_vert_row2(
    int subsampling_factor, uint16_t *s, int pitch, unsigned int mask_16x16_l,
    unsigned int mask_8x8_l, unsigned int mask_4x4_l,
    unsigned int mask_4x4_int_l, const loop_filter_info_n *lfi_n,
    const uint8_t *lfl, int bd) {
572 573 574 575 576 577 578 579 580 581 582 583 584 585 586
  const int mask_shift = subsampling_factor ? 4 : 8;
  const int mask_cutoff = subsampling_factor ? 0xf : 0xff;
  const int lfl_forward = subsampling_factor ? 4 : 8;

  unsigned int mask_16x16_0 = mask_16x16_l & mask_cutoff;
  unsigned int mask_8x8_0 = mask_8x8_l & mask_cutoff;
  unsigned int mask_4x4_0 = mask_4x4_l & mask_cutoff;
  unsigned int mask_4x4_int_0 = mask_4x4_int_l & mask_cutoff;
  unsigned int mask_16x16_1 = (mask_16x16_l >> mask_shift) & mask_cutoff;
  unsigned int mask_8x8_1 = (mask_8x8_l >> mask_shift) & mask_cutoff;
  unsigned int mask_4x4_1 = (mask_4x4_l >> mask_shift) & mask_cutoff;
  unsigned int mask_4x4_int_1 = (mask_4x4_int_l >> mask_shift) & mask_cutoff;
  unsigned int mask;

  for (mask = mask_16x16_0 | mask_8x8_0 | mask_4x4_0 | mask_4x4_int_0 |
587
              mask_16x16_1 | mask_8x8_1 | mask_4x4_1 | mask_4x4_int_1;
588 589 590 591 592 593 594
       mask; mask >>= 1) {
    const loop_filter_thresh *lfi0 = lfi_n->lfthr + *lfl;
    const loop_filter_thresh *lfi1 = lfi_n->lfthr + *(lfl + lfl_forward);

    if (mask & 1) {
      if ((mask_16x16_0 | mask_16x16_1) & 1) {
        if ((mask_16x16_0 & mask_16x16_1) & 1) {
595
          aom_highbd_lpf_vertical_16_dual(s, pitch, lfi0->mblim, lfi0->lim,
596 597
                                          lfi0->hev_thr, bd);
        } else if (mask_16x16_0 & 1) {
598
          aom_highbd_lpf_vertical_16(s, pitch, lfi0->mblim, lfi0->lim,
599 600
                                     lfi0->hev_thr, bd);
        } else {
601
          aom_highbd_lpf_vertical_16(s + 8 * pitch, pitch, lfi1->mblim,
602 603 604 605 606 607
                                     lfi1->lim, lfi1->hev_thr, bd);
        }
      }

      if ((mask_8x8_0 | mask_8x8_1) & 1) {
        if ((mask_8x8_0 & mask_8x8_1) & 1) {
608
          aom_highbd_lpf_vertical_8_dual(s, pitch, lfi0->mblim, lfi0->lim,
609 610 611
                                         lfi0->hev_thr, lfi1->mblim, lfi1->lim,
                                         lfi1->hev_thr, bd);
        } else if (mask_8x8_0 & 1) {
612
          aom_highbd_lpf_vertical_8(s, pitch, lfi0->mblim, lfi0->lim,
613
                                    lfi0->hev_thr, bd);
614
        } else {
615
          aom_highbd_lpf_vertical_8(s + 8 * pitch, pitch, lfi1->mblim,
616
                                    lfi1->lim, lfi1->hev_thr, bd);
617 618 619 620 621
        }
      }

      if ((mask_4x4_0 | mask_4x4_1) & 1) {
        if ((mask_4x4_0 & mask_4x4_1) & 1) {
622
          aom_highbd_lpf_vertical_4_dual(s, pitch, lfi0->mblim, lfi0->lim,
623 624 625
                                         lfi0->hev_thr, lfi1->mblim, lfi1->lim,
                                         lfi1->hev_thr, bd);
        } else if (mask_4x4_0 & 1) {
626
          aom_highbd_lpf_vertical_4(s, pitch, lfi0->mblim, lfi0->lim,
627
                                    lfi0->hev_thr, bd);
628
        } else {
629
          aom_highbd_lpf_vertical_4(s + 8 * pitch, pitch, lfi1->mblim,
630
                                    lfi1->lim, lfi1->hev_thr, bd);
631 632 633 634 635
        }
      }

      if ((mask_4x4_int_0 | mask_4x4_int_1) & 1) {
        if ((mask_4x4_int_0 & mask_4x4_int_1) & 1) {
636
          aom_highbd_lpf_vertical_4_dual(s + 4, pitch, lfi0->mblim, lfi0->lim,
637 638 639
                                         lfi0->hev_thr, lfi1->mblim, lfi1->lim,
                                         lfi1->hev_thr, bd);
        } else if (mask_4x4_int_0 & 1) {
640
          aom_highbd_lpf_vertical_4(s + 4, pitch, lfi0->mblim, lfi0->lim,
641
                                    lfi0->hev_thr, bd);
642
        } else {
643
          aom_highbd_lpf_vertical_4(s + 8 * pitch + 4, pitch, lfi1->mblim,
644
                                    lfi1->lim, lfi1->hev_thr, bd);
645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661
        }
      }
    }

    s += 8;
    lfl += 1;
    mask_16x16_0 >>= 1;
    mask_8x8_0 >>= 1;
    mask_4x4_0 >>= 1;
    mask_4x4_int_0 >>= 1;
    mask_16x16_1 >>= 1;
    mask_8x8_1 >>= 1;
    mask_4x4_1 >>= 1;
    mask_4x4_int_1 >>= 1;
  }
}

662 663 664
static void filter_selectively_horiz(
    uint8_t *s, int pitch, unsigned int mask_16x16, unsigned int mask_8x8,
    unsigned int mask_4x4, unsigned int mask_4x4_int,
665
    const loop_filter_info_n *lfi_n, const uint8_t *lfl) {
666 667 668
  unsigned int mask;
  int count;

669 670
  for (mask = mask_16x16 | mask_8x8 | mask_4x4 | mask_4x4_int; mask;
       mask >>= count) {
671 672 673 674 675 676
    const loop_filter_thresh *lfi = lfi_n->lfthr + *lfl;

    count = 1;
    if (mask & 1) {
      if (mask_16x16 & 1) {
        if ((mask_16x16 & 3) == 3) {
677
          aom_lpf_horizontal_16_dual(s, pitch, lfi->mblim, lfi->lim,
678
                                     lfi->hev_thr);
679 680
          count = 2;
        } else {
681
          aom_lpf_horizontal_16(s, pitch, lfi->mblim, lfi->lim, lfi->hev_thr);
682 683 684 685 686 687
        }
      } else if (mask_8x8 & 1) {
        if ((mask_8x8 & 3) == 3) {
          // Next block's thresholds.
          const loop_filter_thresh *lfin = lfi_n->lfthr + *(lfl + 1);

688
          aom_lpf_horizontal_8_dual(s, pitch, lfi->mblim, lfi->lim,
689 690 691 692
                                    lfi->hev_thr, lfin->mblim, lfin->lim,
                                    lfin->hev_thr);

          if ((mask_4x4_int & 3) == 3) {
693
            aom_lpf_horizontal_4_dual(s + 4 * pitch, pitch, lfi->mblim,
694 695 696 697
                                      lfi->lim, lfi->hev_thr, lfin->mblim,
                                      lfin->lim, lfin->hev_thr);
          } else {
            if (mask_4x4_int & 1)
698
              aom_lpf_horizontal_4(s + 4 * pitch, pitch, lfi->mblim, lfi->lim,
699
                                   lfi->hev_thr);
700
            else if (mask_4x4_int & 2)
701
              aom_lpf_horizontal_4(s + 8 + 4 * pitch, pitch, lfin->mblim,
702
                                   lfin->lim, lfin->hev_thr);
703 704 705
          }
          count = 2;
        } else {
706
          aom_lpf_horizontal_8(s, pitch, lfi->mblim, lfi->lim, lfi->hev_thr);
707 708

          if (mask_4x4_int & 1)
709
            aom_lpf_horizontal_4(s + 4 * pitch, pitch, lfi->mblim, lfi->lim,
710
                                 lfi->hev_thr);
711 712 713 714 715 716
        }
      } else if (mask_4x4 & 1) {
        if ((mask_4x4 & 3) == 3) {
          // Next block's thresholds.
          const loop_filter_thresh *lfin = lfi_n->lfthr + *(lfl + 1);

717
          aom_lpf_horizontal_4_dual(s, pitch, lfi->mblim, lfi->lim,
718 719
                                    lfi->hev_thr, lfin->mblim, lfin->lim,
                                    lfin->hev_thr);
720

721
          if ((mask_4x4_int & 3) == 3) {
722
            aom_lpf_horizontal_4_dual(s + 4 * pitch, pitch, lfi->mblim,
723 724 725 726
                                      lfi->lim, lfi->hev_thr, lfin->mblim,
                                      lfin->lim, lfin->hev_thr);
          } else {
            if (mask_4x4_int & 1)
727
              aom_lpf_horizontal_4(s + 4 * pitch, pitch, lfi->mblim, lfi->lim,
728
                                   lfi->hev_thr);
729
            else if (mask_4x4_int & 2)
730
              aom_lpf_horizontal_4(s + 8 + 4 * pitch, pitch, lfin->mblim,
731
                                   lfin->lim, lfin->hev_thr);
732 733 734
          }
          count = 2;
        } else {
735
          aom_lpf_horizontal_4(s, pitch, lfi->mblim, lfi->lim, lfi->hev_thr);
736 737

          if (mask_4x4_int & 1)
738
            aom_lpf_horizontal_4(s + 4 * pitch, pitch, lfi->mblim, lfi->lim,
739
                                 lfi->hev_thr);
740 741
        }
      } else if (mask_4x4_int & 1) {
742
        aom_lpf_horizontal_4(s + 4 * pitch, pitch, lfi->mblim, lfi->lim,
743
                             lfi->hev_thr);
744 745 746 747 748 749 750 751 752 753 754
      }
    }
    s += 8 * count;
    lfl += count;
    mask_16x16 >>= count;
    mask_8x8 >>= count;
    mask_4x4 >>= count;
    mask_4x4_int >>= count;
  }
}

755 756 757 758
static void highbd_filter_selectively_horiz(
    uint16_t *s, int pitch, unsigned int mask_16x16, unsigned int mask_8x8,
    unsigned int mask_4x4, unsigned int mask_4x4_int,
    const loop_filter_info_n *lfi_n, const uint8_t *lfl, int bd) {
759 760 761
  unsigned int mask;
  int count;

762 763
  for (mask = mask_16x16 | mask_8x8 | mask_4x4 | mask_4x4_int; mask;
       mask >>= count) {
764 765 766 767 768 769
    const loop_filter_thresh *lfi = lfi_n->lfthr + *lfl;

    count = 1;
    if (mask & 1) {
      if (mask_16x16 & 1) {
        if ((mask_16x16 & 3) == 3) {
770
          aom_highbd_lpf_horizontal_16_dual(s, pitch, lfi->mblim, lfi->lim,
771
                                            lfi->hev_thr, bd);
772 773
          count = 2;
        } else {
774 775
          aom_highbd_lpf_horizontal_16(s, pitch, lfi->mblim, lfi->lim,
                                       lfi->hev_thr, bd);
776 777 778 779 780 781
        }
      } else if (mask_8x8 & 1) {
        if ((mask_8x8 & 3) == 3) {
          // Next block's thresholds.
          const loop_filter_thresh *lfin = lfi_n->lfthr + *(lfl + 1);

782
          aom_highbd_lpf_horizontal_8_dual(s, pitch, lfi->mblim, lfi->lim,
783 784 785 786
                                           lfi->hev_thr, lfin->mblim, lfin->lim,
                                           lfin->hev_thr, bd);

          if ((mask_4x4_int & 3) == 3) {
787
            aom_highbd_lpf_horizontal_4_dual(
788 789
                s + 4 * pitch, pitch, lfi->mblim, lfi->lim, lfi->hev_thr,
                lfin->mblim, lfin->lim, lfin->hev_thr, bd);
790 791
          } else {
            if (mask_4x4_int & 1) {
792
              aom_highbd_lpf_horizontal_4(s + 4 * pitch, pitch, lfi->mblim,
793
                                          lfi->lim, lfi->hev_thr, bd);
794
            } else if (mask_4x4_int & 2) {
795
              aom_highbd_lpf_horizontal_4(s + 8 + 4 * pitch, pitch, lfin->mblim,
796
                                          lfin->lim, lfin->hev_thr, bd);
797 798 799 800
            }
          }
          count = 2;
        } else {
801
          aom_highbd_lpf_horizontal_8(s, pitch, lfi->mblim, lfi->lim,
802
                                      lfi->hev_thr, bd);
803 804

          if (mask_4x4_int & 1) {
805
            aom_highbd_lpf_horizontal_4(s + 4 * pitch, pitch, lfi->mblim,
806
                                        lfi->lim, lfi->hev_thr, bd);
807 808 809 810 811 812 813
          }
        }
      } else if (mask_4x4 & 1) {
        if ((mask_4x4 & 3) == 3) {
          // Next block's thresholds.
          const loop_filter_thresh *lfin = lfi_n->lfthr + *(lfl + 1);

814
          aom_highbd_lpf_horizontal_4_dual(s, pitch, lfi->mblim, lfi->lim,
815 816 817
                                           lfi->hev_thr, lfin->mblim, lfin->lim,
                                           lfin->hev_thr, bd);
          if ((mask_4x4_int & 3) == 3) {
818
            aom_highbd_lpf_horizontal_4_dual(
819 820
                s + 4 * pitch, pitch, lfi->mblim, lfi->lim, lfi->hev_thr,
                lfin->mblim, lfin->lim, lfin->hev_thr, bd);
821 822
          } else {
            if (mask_4x4_int & 1) {
823
              aom_highbd_lpf_horizontal_4(s + 4 * pitch, pitch, lfi->mblim,
824
                                          lfi->lim, lfi->hev_thr, bd);
825
            } else if (mask_4x4_int & 2) {
826
              aom_highbd_lpf_horizontal_4(s + 8 + 4 * pitch, pitch, lfin->mblim,
827
                                          lfin->lim, lfin->hev_thr, bd);
828 829 830 831
            }
          }
          count = 2;
        } else {
832
          aom_highbd_lpf_horizontal_4(s, pitch, lfi->mblim, lfi->lim,
833
                                      lfi->hev_thr, bd);
834 835

          if (mask_4x4_int & 1) {
836
            aom_highbd_lpf_horizontal_4(s + 4 * pitch, pitch, lfi->mblim,
837
                                        lfi->lim, lfi->hev_thr, bd);
838 839 840
          }
        }
      } else if (mask_4x4_int & 1) {
841
        aom_highbd_lpf_horizontal_4(s + 4 * pitch, pitch, lfi->mblim, lfi->lim,
842
                                    lfi->hev_thr, bd);
843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860
      }
    }
    s += 8 * count;
    lfl += count;
    mask_16x16 >>= count;
    mask_8x8 >>= count;
    mask_4x4 >>= count;
    mask_4x4_int >>= count;
  }
}

// This function ors into the current lfm structure, where to do loop
// filters for the specific mi we are looking at. It uses information
// including the block_size_type (32x16, 32x32, etc.), the transform size,
// whether there were any coefficients encoded, and the loop filter strength
// block we are currently looking at. Shift is used to position the
// 1's we produce.
// TODO(JBB) Need another function for different resolution color..
861 862 863 864
static void build_masks(AV1_COMMON *const cm,
                        const loop_filter_info_n *const lfi_n,
                        const MODE_INFO *mi, const int shift_y,
                        const int shift_uv, LOOP_FILTER_MASK *lfm) {
865 866
  const MB_MODE_INFO *mbmi = &mi->mbmi;
  const BLOCK_SIZE block_size = mbmi->sb_type;
867 868
  // TODO(debargha): Check if masks can be setup correctly when
  // rectangular transfroms are used with the EXT_TX expt.
869 870 871
  const TX_SIZE tx_size_y = txsize_sqr_map[mbmi->tx_size];
  const TX_SIZE tx_size_y_left = txsize_horz_map[mbmi->tx_size];
  const TX_SIZE tx_size_y_above = txsize_vert_map[mbmi->tx_size];
872 873 874 875
  const TX_SIZE tx_size_uv_actual = av1_get_uv_tx_size(mbmi, 1, 1);
  const TX_SIZE tx_size_uv = txsize_sqr_map[tx_size_uv_actual];
  const TX_SIZE tx_size_uv_left = txsize_horz_map[tx_size_uv_actual];
  const TX_SIZE tx_size_uv_above = txsize_vert_map[tx_size_uv_actual];
876
#if CONFIG_EXT_DELTA_Q
877
#if CONFIG_LOOPFILTER_LEVEL
Cheng Chen's avatar
Cheng Chen committed
878
  const int filter_level = get_filter_level(cm, lfi_n, 0, 0, mbmi);
879
#else
880
  const int filter_level = get_filter_level(cm, lfi_n, mbmi);
881
#endif
882
#else
883
  const int filter_level = get_filter_level(lfi_n, mbmi);
884
  (void)cm;
885
#endif
886 887
  uint64_t *const left_y = &lfm->left_y[tx_size_y_left];
  uint64_t *const above_y = &lfm->above_y[tx_size_y_above];
888
  uint64_t *const int_4x4_y = &lfm->int_4x4_y;
889 890
  uint16_t *const left_uv = &lfm->left_uv[tx_size_uv_left];
  uint16_t *const above_uv = &lfm->above_uv[tx_size_uv_above];
891
  uint16_t *const int_4x4_uv = &lfm->left_int_4x4_uv;
892 893 894 895 896 897 898 899
  int i;

  // If filter level is 0 we don't loop filter.
  if (!filter_level) {
    return;
  } else {
    const int w = num_8x8_blocks_wide_lookup[block_size];
    const int h = num_8x8_blocks_high_lookup[block_size];
900 901 902
    const int row = (shift_y >> MAX_MIB_SIZE_LOG2);
    const int col = shift_y - (row << MAX_MIB_SIZE_LOG2);

903
    for (i = 0; i < h; i++) memset(&lfm->lfl_y[row + i][col], filter_level, w);
904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924
  }

  // These set 1 in the current block size for the block size edges.
  // For instance if the block size is 32x16, we'll set:
  //    above =   1111
  //              0000
  //    and
  //    left  =   1000
  //          =   1000
  // NOTE : In this example the low bit is left most ( 1000 ) is stored as
  //        1,  not 8...
  //
  // U and V set things on a 16 bit scale.
  //
  *above_y |= above_prediction_mask[block_size] << shift_y;
  *above_uv |= above_prediction_mask_uv[block_size] << shift_uv;
  *left_y |= left_prediction_mask[block_size] << shift_y;
  *left_uv |= left_prediction_mask_uv[block_size] << shift_uv;

  // If the block has no coefficients and is not intra we skip applying
  // the loop filter on block edges.
925
  if (mbmi->skip && is_inter_block(mbmi)) return;
926 927 928 929 930

  // Here we are adding a mask for the transform size. The transform
  // size mask is set to be correct for a 64x64 prediction block size. We
  // mask to match the size of the block we are working on and then shift it
  // into place..
931
  *above_y |= (size_mask[block_size] & above_64x64_txform_mask[tx_size_y_above])
932 933
              << shift_y;
  *above_uv |=
934
      (size_mask_uv[block_size] & above_64x64_txform_mask_uv[tx_size_uv_above])
935
      << shift_uv;
936

937
  *left_y |= (size_mask[block_size] & left_64x64_txform_mask[tx_size_y_left])
938
             << shift_y;
939 940 941
  *left_uv |=
      (size_mask_uv[block_size] & left_64x64_txform_mask_uv[tx_size_uv_left])
      << shift_uv;
942 943 944 945 946 947 948 949 950 951 952 953 954 955 956

  // Here we are trying to determine what to do with the internal 4x4 block
  // boundaries.  These differ from the 4x4 boundaries on the outside edge of
  // an 8x8 in that the internal ones can be skipped and don't depend on
  // the prediction block size.
  if (tx_size_y == TX_4X4)
    *int_4x4_y |= (size_mask[block_size] & 0xffffffffffffffffULL) << shift_y;

  if (tx_size_uv == TX_4X4)
    *int_4x4_uv |= (size_mask_uv[block_size] & 0xffff) << shift_uv;
}

// This function does the same thing as the one above with the exception that
// it only affects the y masks. It exists because for blocks < 16x16 in size,
// we only update u and v masks on the first block.
957 958 959 960
static void build_y_mask(AV1_COMMON *const cm,
                         const loop_filter_info_n *const lfi_n,
                         const MODE_INFO *mi, const int shift_y,
                         LOOP_FILTER_MASK *lfm) {
961
  const MB_MODE_INFO *mbmi = &mi->mbmi;
962 963 964
  const TX_SIZE tx_size_y = txsize_sqr_map[mbmi->tx_size];
  const TX_SIZE tx_size_y_left = txsize_horz_map[mbmi->tx_size];
  const TX_SIZE tx_size_y_above = txsize_vert_map[mbmi->tx_size];
965
  const BLOCK_SIZE block_size = mbmi->sb_type;
966
#if CONFIG_EXT_DELTA_Q
967
#if CONFIG_LOOPFILTER_LEVEL
Cheng Chen's avatar
Cheng Chen committed
968
  const int filter_level = get_filter_level(cm, lfi_n, 0, 0, mbmi);
969
#else
970
  const int filter_level = get_filter_level(cm, lfi_n, mbmi);
971
#endif
972
#else
973
  const int filter_level = get_filter_level(lfi_n, mbmi);
974
  (void)cm;
975
#endif
976 977
  uint64_t *const left_y = &lfm->left_y[tx_size_y_left];
  uint64_t *const above_y = &lfm->above_y[tx_size_y_above];
978 979 980 981 982 983 984 985
  uint64_t *const int_4x4_y = &lfm->int_4x4_y;
  int i;

  if (!filter_level) {
    return;
  } else {
    const int w = num_8x8_blocks_wide_lookup[block_size];
    const int h = num_8x8_blocks_high_lookup[block_size];
986 987 988
    const int row = (shift_y >> MAX_MIB_SIZE_LOG2);
    const int col = shift_y - (row << MAX_MIB_SIZE_LOG2);

989
    for (i = 0; i < h; i++) memset(&lfm->lfl_y[row + i][col], filter_level, w);
990 991 992 993 994
  }

  *above_y |= above_prediction_mask[block_size] << shift_y;
  *left_y |= left_prediction_mask[block_size] << shift_y;

995
  if (mbmi->skip && is_inter_block(mbmi)) return;
996

997
  *above_y |= (size_mask[block_size] & above_64x64_txform_mask[tx_size_y_above])
998
              << shift_y;
999

1000
  *left_y |= (size_mask[block_size] & left_64x64_txform_mask[tx_size_y_left])
1001
             << shift_y;
1002 1003 1004 1005 1006

  if (tx_size_y == TX_4X4)
    *int_4x4_y |= (size_mask[block_size] & 0xffffffffffffffffULL) << shift_y;
}

1007
#if CONFIG_LOOPFILTERING_ACROSS_TILES || CONFIG_LOOPFILTERING_ACROSS_TILES_EXT
1008 1009 1010 1011 1012 1013 1014 1015 1016
// This function update the bit masks for the entire 64x64 region represented
// by mi_row, mi_col. In case one of the edge is a tile boundary, loop filtering
// for that edge is disabled. This function only check the tile boundary info
// for the top left corner mi to determine the boundary information for the
// top and left edge of the whole super block
static void update_tile_boundary_filter_mask(AV1_COMMON *const cm,
                                             const int mi_row, const int mi_col,
                                             LOOP_FILTER_MASK *lfm) {
  int i;
1017 1018
  const BOUNDARY_TYPE *const bi =
      cm->boundary_info + mi_row * cm->mi_stride + mi_col;
1019

1020
  if (*bi & TILE_LEFT_BOUNDARY) {
1021 1022 1023 1024 1025 1026
    for (i = 0; i <= TX_32X32; i++) {
      lfm->left_y[i] &= 0xfefefefefefefefeULL;
      lfm->left_uv[i] &= 0xeeee;
    }
  }

1027
  if (*bi & TILE_ABOVE_BOUNDARY) {
1028 1029 1030 1031 1032 1033
    for (i = 0; i <= TX_32X32; i++) {
      lfm->above_y[i] &= 0xffffffffffffff00ULL;
      lfm->above_uv[i] &= 0xfff0;
    }
  }
}
1034
#endif  // CONFIG_LOOPFILTERING_ACROSS_TILES
1035

1036 1037 1038
// This function sets up the bit masks for the entire 64x64 region represented
// by mi_row, mi_col.
// TODO(JBB): This function only works for yv12.
1039 1040
void av1_setup_mask(AV1_COMMON *const cm, int mi_row, int mi_col,
                    MODE_INFO **mi, int mode_info_stride,
1041
                    LOOP_FILTER_MASK *lfm) {
1042 1043 1044
#if CONFIG_EXT_PARTITION
  assert(0 && "Not yet updated");
#endif  // CONFIG_EXT_PARTITION
1045 1046 1047 1048 1049 1050 1051 1052 1053
  int idx_32, idx_16, idx_8;
  const loop_filter_info_n *const lfi_n = &cm->lf_info;
  MODE_INFO **mip = mi;
  MODE_INFO **mip2 = mi;

  // These are offsets to the next mi in the 64x64 block. It is what gets
  // added to the mi ptr as we go through each loop. It helps us to avoid
  // setting up special row and column counters for each index. The last step
  // brings us out back to the starting position.
1054 1055 1056 1057 1058
  const int offset_32[] = { 4, (mode_info_stride << 2) - 4, 4,
                            -(mode_info_stride << 2) - 4 };
  const int offset_16[] = { 2, (mode_info_stride << 1) - 2, 2,
                            -(mode_info_stride << 1) - 2 };
  const int offset[] = { 1, mode_info_stride - 1, 1, -mode_info_stride - 1 };
1059 1060 1061 1062 1063

  // Following variables represent shifts to position the current block
  // mask over the appropriate block. A shift of 36 to the left will move
  // the bits for the final 32 by 32 block in the 64x64 up 4 rows and left
  // 4 rows to the appropriate spot.
1064 1065 1066 1067 1068
  const int shift_32_y[] = { 0, 4, 32, 36 };
  const int shift_16_y[] = { 0, 2, 16, 18 };
  const int shift_8_y[] = { 0, 1, 8, 9 };
  const int shift_32_uv[] = { 0, 2, 8, 10 };
  const int shift_16_uv[] = { 0, 1, 4, 5 };
1069
  int i;
1070 1071
  const int max_rows = AOMMIN(cm->mi_rows - mi_row, MAX_MIB_SIZE);
  const int max_cols = AOMMIN(cm->mi_cols - mi_col, MAX_MIB_SIZE);
1072

1073
  av1_zero(*lfm);
1074 1075 1076 1077 1078 1079
  assert(mip[0] != NULL);

  // TODO(jimbankoski): Try moving most of the following code into decode
  // loop and storing lfm in the mbmi structure so that we don't have to go
  // through the recursive loop structure multiple times.
  switch (mip[0]->mbmi.sb_type) {
1080
    case BLOCK_64X64: build_masks(cm, lfi_n, mip[0], 0, 0, lfm); break;
1081 1082
    case BLOCK_64X32:
      build_masks(cm, lfi_n, mip[0], 0, 0, lfm);
1083
      mip2 = mip + mode_info_stride * 4;
1084
      if (4 >= max_rows) break;
1085
      build_masks(cm, lfi_n, mip2[0], 32, 8, lfm);
1086
      break;
1087 1088
    case BLOCK_32X64:
      build_masks(cm, lfi_n, mip[0], 0, 0, lfm);
1089
      mip2 = mip + 4;
1090
      if (4 >= max_cols) break;
1091
      build_masks(cm, lfi_n, mip2[0], 4, 2, lfm);
1092 1093
      break;
    default:
1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166
      for (idx_32 = 0; idx_32 < 4; mip += offset_32[idx_32], ++idx_32) {
        const int shift_y_32 = shift_32_y[idx_32];
        const int shift_uv_32 = shift_32_uv[idx_32];
        const int mi_32_col_offset = ((idx_32 & 1) << 2);
        const int mi_32_row_offset = ((idx_32 >> 1) << 2);
        if (mi_32_col_offset >= max_cols || mi_32_row_offset >= max_rows)
          continue;
        switch (mip[0]->mbmi.sb_type) {
          case BLOCK_32X32:
            build_masks(cm, lfi_n, mip[0], shift_y_32, shift_uv_32, lfm);
            break;
          case BLOCK_32X16:
            build_masks(cm, lfi_n, mip[0], shift_y_32, shift_uv_32, lfm);
            if (mi_32_row_offset + 2 >= max_rows) continue;
            mip2 = mip + mode_info_stride * 2;
            build_masks(cm, lfi_n, mip2[0], shift_y_32 + 16, shift_uv_32 + 4,
                        lfm);
            break;
          case BLOCK_16X32:
            build_masks(cm, lfi_n, mip[0], shift_y_32, shift_uv_32, lfm);
            if (mi_32_col_offset + 2 >= max_cols) continue;
            mip2 = mip + 2;
            build_masks(cm, lfi_n, mip2[0], shift_y_32 + 2, shift_uv_32 + 1,
                        lfm);
            break;
          default:
            for (idx_16 = 0; idx_16 < 4; mip += offset_16[idx_16], ++idx_16) {
              const int shift_y_32_16 = shift_y_32 + shift_16_y[idx_16];
              const int shift_uv_32_16 = shift_uv_32 + shift_16_uv[idx_16];
              const int mi_16_col_offset =
                  mi_32_col_offset + ((idx_16 & 1) << 1);
              const int mi_16_row_offset =
                  mi_32_row_offset + ((idx_16 >> 1) << 1);

              if (mi_16_col_offset >= max_cols || mi_16_row_offset >= max_rows)
                continue;

              switch (mip[0]->mbmi.sb_type) {
                case BLOCK_16X16:
                  build_masks(cm, lfi_n, mip[0], shift_y_32_16, shift_uv_32_16,
                              lfm);
                  break;
                case BLOCK_16X8:
                  build_masks(cm, lfi_n, mip[0], shift_y_32_16, shift_uv_32_16,
                              lfm);
                  if (mi_16_row_offset + 1 >= max_rows) continue;
                  mip2 = mip + mode_info_stride;
                  build_y_mask(cm, lfi_n, mip2[0], shift_y_32_16 + 8, lfm);
                  break;
                case BLOCK_8X16:
                  build_masks(cm, lfi_n, mip[0], shift_y_32_16, shift_uv_32_16,
                              lfm);
                  if (mi_16_col_offset + 1 >= max_cols) continue;
                  mip2 = mip + 1;
                  build_y_mask(cm, lfi_n, mip2[0], shift_y_32_16 + 1, lfm);
                  break;
                default: {
                  const int shift_y_32_16_8_zero = shift_y_32_16 + shift_8_y[0];
                  build_masks(cm, lfi_n, mip[0], shift_y_32_16_8_zero,
                              shift_uv_32_16, lfm);
                  mip += offset[0];
                  for (idx_8 = 1; idx_8 < 4; mip += offset[idx_8], ++idx_8) {
                    const int shift_y_32_16_8 =
                        shift_y_32_16 + shift_8_y[idx_8];
                    const int mi_8_col_offset =
                        mi_16_col_offset + ((idx_8 & 1));
                    const int mi_8_row_offset =
                        mi_16_row_offset + ((idx_8 >> 1));

                    if (mi_8_col_offset >= max_cols ||
                        mi_8_row_offset >= max_rows)
                      continue;
                    build_y_mask(cm, lfi_n, mip[0], shift_y_32_16_8, lfm);
1167
                  }
1168
                  break;
1169 1170
                }
              }
1171 1172
            }
            break;
1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196
        }
      }
      break;
  }
  // The largest loopfilter we have is 16x16 so we use the 16x16 mask
  // for 32x32 transforms also.
  lfm->left_y[TX_16X16] |= lfm->left_y[TX_32X32];
  lfm->above_y[TX_16X16] |= lfm->above_y[TX_32X32];
  lfm->left_uv[TX_16X16] |= lfm->left_uv[TX_32X32];
  lfm->above_uv[TX_16X16] |= lfm->above_uv[TX_32X32];

  // We do at least 8 tap filter on every 32x32 even if the transform size
  // is 4x4. So if the 4x4 is set on a border pixel add it to the 8x8 and
  // remove it from the 4x4.
  lfm->left_y[TX_8X8] |= lfm->left_y[TX_4X4] & left_border;
  lfm->left_y[TX_4X4] &= ~left_border;
  lfm->above_y[TX_8X8] |= lfm->above_y[TX_4X4] & above_border;
  lfm->above_y[TX_4X4] &= ~above_border;
  lfm->left_uv[TX_8X8] |= lfm->left_uv[TX_4X4] & left_border_uv;
  lfm->left_uv[TX_4X4] &= ~left_border_uv;
  lfm->above_uv[TX_8X8] |= lfm->above_uv[TX_4X4] & above_border_uv;
  lfm->above_uv[TX_4X4] &= ~above_border_uv;

  // We do some special edge handling.
1197
  if (mi_row + MAX_MIB_SIZE > cm->mi_rows) {
1198 1199 1200
    const uint64_t rows = cm->mi_rows - mi_row;

    // Each pixel inside the border gets a 1,
1201
    const uint64_t mask_y = (((uint64_t)1 << (rows << MAX_MIB_SIZE_LOG2)) - 1);
1202
    const uint16_t mask_uv =
1203
        (((uint16_t)1 << (((rows + 1) >> 1) << (MAX_MIB_SIZE_LOG2 - 1))) - 1);
1204 1205 1206 1207 1208 1209 1210 1211 1212

    // Remove values completely outside our border.
    for (i = 0; i < TX_32X32; i++) {
      lfm->left_y[i] &= mask_y;
      lfm->above_y[i] &= mask_y;
      lfm->left_uv[i] &= mask_uv;
      lfm->above_uv[i] &= mask_uv;
    }
    lfm->int_4x4_y &= mask_y;
1213
    lfm->above_int_4x4_uv = lfm->left_int_4x4_uv & mask_uv;
1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224

    // We don't apply a wide loop filter on the last uv block row. If set
    // apply the shorter one instead.
    if (rows == 1) {
      lfm->above_uv[TX_8X8] |= lfm->above_uv[TX_16X16];
      lfm->above_uv[TX_16X16] = 0;
    }
    if (rows == 5) {
      lfm->above_uv[TX_8X8] |= lfm->above_uv[TX_16X16] & 0xff00;
      lfm->above_uv[TX_16X16] &= ~(lfm->above_uv[TX_16X16] & 0xff00);
    }
1225 1226
  } else {
    lfm->above_int_4x4_uv = lfm->left_int_4x4_uv;
1227 1228
  }

1229
  if (mi_col + MAX_MIB_SIZE > cm->mi_cols) {
1230 1231 1232 1233
    const uint64_t columns = cm->mi_cols - mi_col;

    // Each pixel inside the border gets a 1, the multiply copies the border
    // to where we need it.
1234
    const uint64_t mask_y = (((1 << columns) - 1)) * 0x0101010101010101ULL;
1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248
    const uint16_t mask_uv = ((1 << ((columns + 1) >> 1)) - 1) * 0x1111;

    // Internal edges are not applied on the last column of the image so
    // we mask 1 more for the internal edges
    const uint16_t mask_uv_int = ((1 << (columns >> 1)) - 1) * 0x1111;

    // Remove the bits outside the image edge.
    for (i = 0; i < TX_32X32; i++) {
      lfm->left_y[i] &= mask_y;
      lfm->above_y[i] &= mask_y;
      lfm->left_uv[i] &= mask_uv;
      lfm->above_uv[i] &= mask_uv;
    }
    lfm->int_4x4_y &= mask_y;
1249
    lfm->left_int_4x4_uv &= mask_uv_int;
1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270

    // We don't apply a wide loop filter on the last uv column. If set
    // apply the shorter one instead.
    if (columns == 1) {
      lfm->left_uv[TX_8X8] |= lfm->left_uv[TX_16X16];
      lfm->left_uv[TX_16X16] = 0;
    }
    if (columns == 5) {
      lfm->left_uv[TX_8X8] |= (lfm->left_uv[TX_16X16] & 0xcccc);
      lfm->left_uv[TX_16X16] &= ~(lfm->left_uv[TX_16X16] & 0xcccc);
    }
  }
  // We don't apply a loop filter on the first column in the image, mask that
  // out.
  if (mi_col == 0) {
    for (i = 0; i < TX_32X32; i++) {
      lfm->left_y[i] &= 0xfefefefefefefefeULL;
      lfm->left_uv[i] &= 0xeeee;
    }
  }

1271
#if CONFIG_LOOPFILTERING_ACROSS_TILES || CONFIG_LOOPFILTERING_ACROSS_TILES_EXT
1272 1273 1274
  if (av1_disable_loopfilter_on_tile_boundary(cm)) {
    update_tile_boundary_filter_mask(cm, mi_row, mi_col, lfm);
  }
1275
#endif  // CONFIG_LOOPFILTERING_ACROSS_TILES
1276

1277 1278 1279 1280 1281
  // Assert if we try to apply 2 different loop filters at the same position.
  assert(!(lfm->left_y[TX_16X16] & lfm->left_y[TX_8X8]));
  assert(!(lfm->left_y[TX_16X16] & lfm->left_y[TX_4X4]));
  assert(!(lfm->left_y[TX_8X8] & lfm->left_y[TX_4X4]));
  assert(!(lfm->int_4x4_y & lfm->left_y[TX_16X16]));
1282
  assert(!(lfm->left_uv[TX_16X16] & lfm->left_uv[TX_8X8]));
1283 1284
  assert(!(lfm->left_uv[TX_16X16] & lfm->left_uv[TX_4X4]));
  assert(!(lfm->left_uv[TX_8X8] & lfm->left_uv[TX_4X4]));
1285
  assert(!(lfm->left_int_4x4_uv & lfm->left_uv[TX_16X16]));
1286 1287 1288 1289 1290 1291 1292
  assert(!(lfm->above_y[TX_16X16] & lfm->above_y[TX_8X8]));
  assert(!(lfm->above_y[TX_16X16] & lfm->above_y[TX_4X4]));
  assert(!(lfm->above_y[TX_8X8] & lfm->above_y[TX_4X4]));
  assert(!(lfm->int_4x4_y & lfm->above_y[TX_16X16]));
  assert(!(lfm->above_uv[TX_16X16] & lfm->above_uv[TX_8X8]));
  assert(!(lfm->above_uv[TX_16X16] & lfm->above_uv[TX_4X4]));
  assert(!(lfm->above_uv[TX_8X8] & lfm->above_uv[TX_4X4]));
1293
  assert(!(lfm->above_int_4x4_uv & lfm->above_uv[TX_16X16]));
1294 1295
}

1296 1297 1298
static void filter_selectively_vert(
    uint8_t *s, int pitch, unsigned int mask_16x16, unsigned int mask_8x8,
    unsigned int mask_4x4, unsigned int mask_4x4_int,
1299
    const loop_filter_info_n *lfi_n, const uint8_t *lfl) {
1300 1301
  unsigned int mask;

1302 1303
  for (mask = mask_16x16 | mask_8x8 | mask_4x4 | mask_4x4_int; mask;
       mask >>= 1) {
1304 1305 1306 1307
    const loop_filter_thresh *lfi = lfi_n->lfthr + *lfl;

    if (mask & 1) {
      if (mask_16x16 & 1) {
1308
        aom_lpf_vertical_16(s, pitch, lfi->mblim, lfi->lim, lfi->hev_thr);
1309
      } else if (mask_8x8 & 1) {
1310
        aom_lpf_vertical_8(s, pitch, lfi->mblim, lfi->lim, lfi->hev_thr);
1311
      } else if (mask_4x4 & 1) {
1312
        aom_lpf_vertical_4(s, pitch, lfi->mblim, lfi->lim, lfi->hev_thr);
1313 1314 1315
      }
    }
    if (mask_4x4_int & 1)
1316
      aom_lpf_vertical_4(s + 4, pitch, lfi->mblim, lfi->lim, lfi->hev_thr);
1317 1318 1319 1320 1321 1322 1323 1324 1325
    s += 8;
    lfl += 1;
    mask_16x16 >>= 1;
    mask_8x8 >>= 1;
    mask_4x4 >>= 1;
    mask_4x4_int >>= 1;
  }
}

1326 1327 1328 1329
static void highbd_filter_selectively_vert(
    uint16_t *s, int pitch, unsigned int mask_16x16, unsigned int mask_8x8,
    unsigned int mask_4x4, unsigned int mask_4x4_int,
    const loop_filter_info_n *lfi_n, const uint8_t *lfl, int bd) {
1330 1331
  unsigned int mask;

1332 1333
  for (mask = mask_16x16 | mask_8x8 | mask_4x4 | mask_4x4_int; mask;
       mask >>= 1) {
1334 1335 1336 1337
    const loop_filter_thresh *lfi = lfi_n->lfthr + *lfl;

    if (mask & 1) {
      if (mask_16x16 & 1) {
1338
        aom_highbd_lpf_vertical_16(s, pitch, lfi->mblim, lfi->lim, lfi->hev_thr,
1339
                                   bd);
1340
      } else if (mask_8x8 & 1) {
1341
        aom_highbd_lpf_vertical_8(s, pitch, lfi->mblim, lfi->lim, lfi->hev_thr,
1342
                                  bd);
1343
      } else if (mask_4x4 & 1) {
1344
        aom_highbd_lpf_vertical_4(s, pitch, lfi->mblim, lfi->lim, lfi->hev_thr,
1345
                                  bd);
1346 1347 1348
      }
    }
    if (mask_4x4_int & 1)
1349
      aom_highbd_lpf_vertical_4(s + 4, pitch, lfi->mblim, lfi->lim,
1350
                                lfi->hev_thr, bd);
1351 1352 1353 1354 1355 1356 1357 1358 1359
    s += 8;
    lfl += 1;
    mask_16x16 >>= 1;
    mask_8x8 >>= 1;
    mask_4x4 >>= 1;
    mask_4x4_int >>= 1;
  }
}

1360 1361 1362 1363 1364 1365 1366 1367 1368 1369
typedef struct {
  unsigned int m16x16;
  unsigned int m8x8;
  unsigned int m4x4;
} FilterMasks;

// Get filter level and masks for the given row index 'idx_r'. (Only used for
// the non420 case).
// Note: 'row_masks_ptr' and/or 'col_masks_ptr' can be passed NULL.
static void get_filter_level_and_masks_non420(
1370
    AV1_COMMON *const cm, const struct macroblockd_plane *const plane, int pl,
1371
    MODE_INFO **mib, int mi_row, int mi_col, int idx_r, uint8_t *const lfl_r,
1372 1373
    unsigned int *const mask_4x4_int_r_ptr,
    unsigned int *const mask_4x4_int_c_ptr, FilterMasks *const row_masks_ptr,
1374
    FilterMasks *const col_masks_ptr) {