warped_motion.c 55.7 KB
Newer Older
1
/*
2
 * Copyright (c) 2016, Alliance for Open Media. All rights reserved
3
 *
4 5 6 7 8 9
 * This source code is subject to the terms of the BSD 2 Clause License and
 * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
 * was not distributed with this source code in the LICENSE file, you can
 * obtain it at www.aomedia.org/license/software. If the Alliance for Open
 * Media Patent License 1.0 was not distributed with this source code in the
 * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
10 11 12 13 14 15 16 17
 */

#include <stdio.h>
#include <stdlib.h>
#include <memory.h>
#include <math.h>
#include <assert.h>

18
#include "./av1_rtcd.h"
19
#include "av1/common/warped_motion.h"
20
#include "av1/common/scale.h"
21

22 23
#define WARP_ERROR_BLOCK 32

24 25
/* clang-format off */
static const int error_measure_lut[512] = {
26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90
  // pow 0.7
  16384, 16339, 16294, 16249, 16204, 16158, 16113, 16068,
  16022, 15977, 15932, 15886, 15840, 15795, 15749, 15703,
  15657, 15612, 15566, 15520, 15474, 15427, 15381, 15335,
  15289, 15242, 15196, 15149, 15103, 15056, 15010, 14963,
  14916, 14869, 14822, 14775, 14728, 14681, 14634, 14587,
  14539, 14492, 14445, 14397, 14350, 14302, 14254, 14206,
  14159, 14111, 14063, 14015, 13967, 13918, 13870, 13822,
  13773, 13725, 13676, 13628, 13579, 13530, 13481, 13432,
  13383, 13334, 13285, 13236, 13187, 13137, 13088, 13038,
  12988, 12939, 12889, 12839, 12789, 12739, 12689, 12639,
  12588, 12538, 12487, 12437, 12386, 12335, 12285, 12234,
  12183, 12132, 12080, 12029, 11978, 11926, 11875, 11823,
  11771, 11719, 11667, 11615, 11563, 11511, 11458, 11406,
  11353, 11301, 11248, 11195, 11142, 11089, 11036, 10982,
  10929, 10875, 10822, 10768, 10714, 10660, 10606, 10552,
  10497, 10443, 10388, 10333, 10279, 10224, 10168, 10113,
  10058, 10002,  9947,  9891,  9835,  9779,  9723,  9666,
  9610, 9553, 9497, 9440, 9383, 9326, 9268, 9211,
  9153, 9095, 9037, 8979, 8921, 8862, 8804, 8745,
  8686, 8627, 8568, 8508, 8449, 8389, 8329, 8269,
  8208, 8148, 8087, 8026, 7965, 7903, 7842, 7780,
  7718, 7656, 7593, 7531, 7468, 7405, 7341, 7278,
  7214, 7150, 7086, 7021, 6956, 6891, 6826, 6760,
  6695, 6628, 6562, 6495, 6428, 6361, 6293, 6225,
  6157, 6089, 6020, 5950, 5881, 5811, 5741, 5670,
  5599, 5527, 5456, 5383, 5311, 5237, 5164, 5090,
  5015, 4941, 4865, 4789, 4713, 4636, 4558, 4480,
  4401, 4322, 4242, 4162, 4080, 3998, 3916, 3832,
  3748, 3663, 3577, 3490, 3402, 3314, 3224, 3133,
  3041, 2948, 2854, 2758, 2661, 2562, 2461, 2359,
  2255, 2148, 2040, 1929, 1815, 1698, 1577, 1452,
  1323, 1187, 1045,  894,  731,  550,  339,    0,
  339,  550,  731,  894, 1045, 1187, 1323, 1452,
  1577, 1698, 1815, 1929, 2040, 2148, 2255, 2359,
  2461, 2562, 2661, 2758, 2854, 2948, 3041, 3133,
  3224, 3314, 3402, 3490, 3577, 3663, 3748, 3832,
  3916, 3998, 4080, 4162, 4242, 4322, 4401, 4480,
  4558, 4636, 4713, 4789, 4865, 4941, 5015, 5090,
  5164, 5237, 5311, 5383, 5456, 5527, 5599, 5670,
  5741, 5811, 5881, 5950, 6020, 6089, 6157, 6225,
  6293, 6361, 6428, 6495, 6562, 6628, 6695, 6760,
  6826, 6891, 6956, 7021, 7086, 7150, 7214, 7278,
  7341, 7405, 7468, 7531, 7593, 7656, 7718, 7780,
  7842, 7903, 7965, 8026, 8087, 8148, 8208, 8269,
  8329, 8389, 8449, 8508, 8568, 8627, 8686, 8745,
  8804, 8862, 8921, 8979, 9037, 9095, 9153, 9211,
  9268, 9326, 9383, 9440, 9497, 9553, 9610, 9666,
  9723,  9779,  9835,  9891,  9947, 10002, 10058, 10113,
  10168, 10224, 10279, 10333, 10388, 10443, 10497, 10552,
  10606, 10660, 10714, 10768, 10822, 10875, 10929, 10982,
  11036, 11089, 11142, 11195, 11248, 11301, 11353, 11406,
  11458, 11511, 11563, 11615, 11667, 11719, 11771, 11823,
  11875, 11926, 11978, 12029, 12080, 12132, 12183, 12234,
  12285, 12335, 12386, 12437, 12487, 12538, 12588, 12639,
  12689, 12739, 12789, 12839, 12889, 12939, 12988, 13038,
  13088, 13137, 13187, 13236, 13285, 13334, 13383, 13432,
  13481, 13530, 13579, 13628, 13676, 13725, 13773, 13822,
  13870, 13918, 13967, 14015, 14063, 14111, 14159, 14206,
  14254, 14302, 14350, 14397, 14445, 14492, 14539, 14587,
  14634, 14681, 14728, 14775, 14822, 14869, 14916, 14963,
  15010, 15056, 15103, 15149, 15196, 15242, 15289, 15335,
  15381, 15427, 15474, 15520, 15566, 15612, 15657, 15703,
  15749, 15795, 15840, 15886, 15932, 15977, 16022, 16068,
  16113, 16158, 16204, 16249, 16294, 16339, 16384, 16384,
91 92
};
/* clang-format on */
93

Yaowu Xu's avatar
Yaowu Xu committed
94 95 96 97
void project_points_affine(const int32_t *mat, int *points, int *proj,
                           const int n, const int stride_points,
                           const int stride_proj, const int subsampling_x,
                           const int subsampling_y) {
98 99 100 101
  int i;
  for (i = 0; i < n; ++i) {
    const int x = *(points++), y = *(points++);
    if (subsampling_x)
102
      *(proj++) = ROUND_POWER_OF_TWO_SIGNED(
103 104
          mat[2] * 2 * x + mat[3] * 2 * y + mat[0] +
              (mat[2] + mat[3] - (1 << WARPEDMODEL_PREC_BITS)) / 2,
105 106
          WARPEDDIFF_PREC_BITS + 1);
    else
107
      *(proj++) = ROUND_POWER_OF_TWO_SIGNED(mat[2] * x + mat[3] * y + mat[0],
108
                                            WARPEDDIFF_PREC_BITS);
109
    if (subsampling_y)
110
      *(proj++) = ROUND_POWER_OF_TWO_SIGNED(
111
          mat[4] * 2 * x + mat[5] * 2 * y + mat[1] +
Sarah Parker's avatar
Sarah Parker committed
112
              (mat[4] + mat[5] - (1 << WARPEDMODEL_PREC_BITS)) / 2,
113 114
          WARPEDDIFF_PREC_BITS + 1);
    else
115
      *(proj++) = ROUND_POWER_OF_TWO_SIGNED(mat[4] * x + mat[5] * y + mat[1],
116
                                            WARPEDDIFF_PREC_BITS);
117 118 119 120 121
    points += stride_points - 2;
    proj += stride_proj - 2;
  }
}

122 123 124 125 126 127
// For warping, we really use a 6-tap filter, but we do blocks of 8 pixels
// at a time. The zoom/rotation/shear in the model are applied to the
// "fractional" position of each pixel, which therefore varies within
// [-1, 2) * WARPEDPIXEL_PREC_SHIFTS.
// We need an extra 2 taps to fit this in, for a total of 8 taps.
/* clang-format off */
128
const int16_t warped_filter[WARPEDPIXEL_PREC_SHIFTS * 3 + 1][8] = {
129
#if WARPEDPIXEL_PREC_BITS == 6
130
  // [-1, 0)
131
  { 0,   0, 127,   1,   0, 0, 0, 0 }, { 0, - 1, 127,   2,   0, 0, 0, 0 },
132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161
  { 1, - 3, 127,   4, - 1, 0, 0, 0 }, { 1, - 4, 126,   6, - 2, 1, 0, 0 },
  { 1, - 5, 126,   8, - 3, 1, 0, 0 }, { 1, - 6, 125,  11, - 4, 1, 0, 0 },
  { 1, - 7, 124,  13, - 4, 1, 0, 0 }, { 2, - 8, 123,  15, - 5, 1, 0, 0 },
  { 2, - 9, 122,  18, - 6, 1, 0, 0 }, { 2, -10, 121,  20, - 6, 1, 0, 0 },
  { 2, -11, 120,  22, - 7, 2, 0, 0 }, { 2, -12, 119,  25, - 8, 2, 0, 0 },
  { 3, -13, 117,  27, - 8, 2, 0, 0 }, { 3, -13, 116,  29, - 9, 2, 0, 0 },
  { 3, -14, 114,  32, -10, 3, 0, 0 }, { 3, -15, 113,  35, -10, 2, 0, 0 },
  { 3, -15, 111,  37, -11, 3, 0, 0 }, { 3, -16, 109,  40, -11, 3, 0, 0 },
  { 3, -16, 108,  42, -12, 3, 0, 0 }, { 4, -17, 106,  45, -13, 3, 0, 0 },
  { 4, -17, 104,  47, -13, 3, 0, 0 }, { 4, -17, 102,  50, -14, 3, 0, 0 },
  { 4, -17, 100,  52, -14, 3, 0, 0 }, { 4, -18,  98,  55, -15, 4, 0, 0 },
  { 4, -18,  96,  58, -15, 3, 0, 0 }, { 4, -18,  94,  60, -16, 4, 0, 0 },
  { 4, -18,  91,  63, -16, 4, 0, 0 }, { 4, -18,  89,  65, -16, 4, 0, 0 },
  { 4, -18,  87,  68, -17, 4, 0, 0 }, { 4, -18,  85,  70, -17, 4, 0, 0 },
  { 4, -18,  82,  73, -17, 4, 0, 0 }, { 4, -18,  80,  75, -17, 4, 0, 0 },
  { 4, -18,  78,  78, -18, 4, 0, 0 }, { 4, -17,  75,  80, -18, 4, 0, 0 },
  { 4, -17,  73,  82, -18, 4, 0, 0 }, { 4, -17,  70,  85, -18, 4, 0, 0 },
  { 4, -17,  68,  87, -18, 4, 0, 0 }, { 4, -16,  65,  89, -18, 4, 0, 0 },
  { 4, -16,  63,  91, -18, 4, 0, 0 }, { 4, -16,  60,  94, -18, 4, 0, 0 },
  { 3, -15,  58,  96, -18, 4, 0, 0 }, { 4, -15,  55,  98, -18, 4, 0, 0 },
  { 3, -14,  52, 100, -17, 4, 0, 0 }, { 3, -14,  50, 102, -17, 4, 0, 0 },
  { 3, -13,  47, 104, -17, 4, 0, 0 }, { 3, -13,  45, 106, -17, 4, 0, 0 },
  { 3, -12,  42, 108, -16, 3, 0, 0 }, { 3, -11,  40, 109, -16, 3, 0, 0 },
  { 3, -11,  37, 111, -15, 3, 0, 0 }, { 2, -10,  35, 113, -15, 3, 0, 0 },
  { 3, -10,  32, 114, -14, 3, 0, 0 }, { 2, - 9,  29, 116, -13, 3, 0, 0 },
  { 2, - 8,  27, 117, -13, 3, 0, 0 }, { 2, - 8,  25, 119, -12, 2, 0, 0 },
  { 2, - 7,  22, 120, -11, 2, 0, 0 }, { 1, - 6,  20, 121, -10, 2, 0, 0 },
  { 1, - 6,  18, 122, - 9, 2, 0, 0 }, { 1, - 5,  15, 123, - 8, 2, 0, 0 },
  { 1, - 4,  13, 124, - 7, 1, 0, 0 }, { 1, - 4,  11, 125, - 6, 1, 0, 0 },
  { 1, - 3,   8, 126, - 5, 1, 0, 0 }, { 1, - 2,   6, 126, - 4, 1, 0, 0 },
162
  { 0, - 1,   4, 127, - 3, 1, 0, 0 }, { 0,   0,   2, 127, - 1, 0, 0, 0 },
163 164

  // [0, 1)
165
  { 0,  0,   0, 127,   1,   0,  0,  0}, { 0,  0,  -1, 127,   2,   0,  0,  0},
166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195
  { 0,  1,  -3, 127,   4,  -2,  1,  0}, { 0,  1,  -5, 127,   6,  -2,  1,  0},
  { 0,  2,  -6, 126,   8,  -3,  1,  0}, {-1,  2,  -7, 126,  11,  -4,  2, -1},
  {-1,  3,  -8, 125,  13,  -5,  2, -1}, {-1,  3, -10, 124,  16,  -6,  3, -1},
  {-1,  4, -11, 123,  18,  -7,  3, -1}, {-1,  4, -12, 122,  20,  -7,  3, -1},
  {-1,  4, -13, 121,  23,  -8,  3, -1}, {-2,  5, -14, 120,  25,  -9,  4, -1},
  {-1,  5, -15, 119,  27, -10,  4, -1}, {-1,  5, -16, 118,  30, -11,  4, -1},
  {-2,  6, -17, 116,  33, -12,  5, -1}, {-2,  6, -17, 114,  35, -12,  5, -1},
  {-2,  6, -18, 113,  38, -13,  5, -1}, {-2,  7, -19, 111,  41, -14,  6, -2},
  {-2,  7, -19, 110,  43, -15,  6, -2}, {-2,  7, -20, 108,  46, -15,  6, -2},
  {-2,  7, -20, 106,  49, -16,  6, -2}, {-2,  7, -21, 104,  51, -16,  7, -2},
  {-2,  7, -21, 102,  54, -17,  7, -2}, {-2,  8, -21, 100,  56, -18,  7, -2},
  {-2,  8, -22,  98,  59, -18,  7, -2}, {-2,  8, -22,  96,  62, -19,  7, -2},
  {-2,  8, -22,  94,  64, -19,  7, -2}, {-2,  8, -22,  91,  67, -20,  8, -2},
  {-2,  8, -22,  89,  69, -20,  8, -2}, {-2,  8, -22,  87,  72, -21,  8, -2},
  {-2,  8, -21,  84,  74, -21,  8, -2}, {-2,  8, -22,  82,  77, -21,  8, -2},
  {-2,  8, -21,  79,  79, -21,  8, -2}, {-2,  8, -21,  77,  82, -22,  8, -2},
  {-2,  8, -21,  74,  84, -21,  8, -2}, {-2,  8, -21,  72,  87, -22,  8, -2},
  {-2,  8, -20,  69,  89, -22,  8, -2}, {-2,  8, -20,  67,  91, -22,  8, -2},
  {-2,  7, -19,  64,  94, -22,  8, -2}, {-2,  7, -19,  62,  96, -22,  8, -2},
  {-2,  7, -18,  59,  98, -22,  8, -2}, {-2,  7, -18,  56, 100, -21,  8, -2},
  {-2,  7, -17,  54, 102, -21,  7, -2}, {-2,  7, -16,  51, 104, -21,  7, -2},
  {-2,  6, -16,  49, 106, -20,  7, -2}, {-2,  6, -15,  46, 108, -20,  7, -2},
  {-2,  6, -15,  43, 110, -19,  7, -2}, {-2,  6, -14,  41, 111, -19,  7, -2},
  {-1,  5, -13,  38, 113, -18,  6, -2}, {-1,  5, -12,  35, 114, -17,  6, -2},
  {-1,  5, -12,  33, 116, -17,  6, -2}, {-1,  4, -11,  30, 118, -16,  5, -1},
  {-1,  4, -10,  27, 119, -15,  5, -1}, {-1,  4,  -9,  25, 120, -14,  5, -2},
  {-1,  3,  -8,  23, 121, -13,  4, -1}, {-1,  3,  -7,  20, 122, -12,  4, -1},
  {-1,  3,  -7,  18, 123, -11,  4, -1}, {-1,  3,  -6,  16, 124, -10,  3, -1},
  {-1,  2,  -5,  13, 125,  -8,  3, -1}, {-1,  2,  -4,  11, 126,  -7,  2, -1},
  { 0,  1,  -3,   8, 126,  -6,  2,  0}, { 0,  1,  -2,   6, 127,  -5,  1,  0},
196
  { 0,  1,  -2,   4, 127,  -3,  1,  0}, { 0,  0,   0,   2, 127,  -1,  0,  0},
197 198

  // [1, 2)
199
  { 0, 0, 0,   1, 127,   0,   0, 0 }, { 0, 0, 0, - 1, 127,   2,   0, 0 },
200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229
  { 0, 0, 1, - 3, 127,   4, - 1, 0 }, { 0, 0, 1, - 4, 126,   6, - 2, 1 },
  { 0, 0, 1, - 5, 126,   8, - 3, 1 }, { 0, 0, 1, - 6, 125,  11, - 4, 1 },
  { 0, 0, 1, - 7, 124,  13, - 4, 1 }, { 0, 0, 2, - 8, 123,  15, - 5, 1 },
  { 0, 0, 2, - 9, 122,  18, - 6, 1 }, { 0, 0, 2, -10, 121,  20, - 6, 1 },
  { 0, 0, 2, -11, 120,  22, - 7, 2 }, { 0, 0, 2, -12, 119,  25, - 8, 2 },
  { 0, 0, 3, -13, 117,  27, - 8, 2 }, { 0, 0, 3, -13, 116,  29, - 9, 2 },
  { 0, 0, 3, -14, 114,  32, -10, 3 }, { 0, 0, 3, -15, 113,  35, -10, 2 },
  { 0, 0, 3, -15, 111,  37, -11, 3 }, { 0, 0, 3, -16, 109,  40, -11, 3 },
  { 0, 0, 3, -16, 108,  42, -12, 3 }, { 0, 0, 4, -17, 106,  45, -13, 3 },
  { 0, 0, 4, -17, 104,  47, -13, 3 }, { 0, 0, 4, -17, 102,  50, -14, 3 },
  { 0, 0, 4, -17, 100,  52, -14, 3 }, { 0, 0, 4, -18,  98,  55, -15, 4 },
  { 0, 0, 4, -18,  96,  58, -15, 3 }, { 0, 0, 4, -18,  94,  60, -16, 4 },
  { 0, 0, 4, -18,  91,  63, -16, 4 }, { 0, 0, 4, -18,  89,  65, -16, 4 },
  { 0, 0, 4, -18,  87,  68, -17, 4 }, { 0, 0, 4, -18,  85,  70, -17, 4 },
  { 0, 0, 4, -18,  82,  73, -17, 4 }, { 0, 0, 4, -18,  80,  75, -17, 4 },
  { 0, 0, 4, -18,  78,  78, -18, 4 }, { 0, 0, 4, -17,  75,  80, -18, 4 },
  { 0, 0, 4, -17,  73,  82, -18, 4 }, { 0, 0, 4, -17,  70,  85, -18, 4 },
  { 0, 0, 4, -17,  68,  87, -18, 4 }, { 0, 0, 4, -16,  65,  89, -18, 4 },
  { 0, 0, 4, -16,  63,  91, -18, 4 }, { 0, 0, 4, -16,  60,  94, -18, 4 },
  { 0, 0, 3, -15,  58,  96, -18, 4 }, { 0, 0, 4, -15,  55,  98, -18, 4 },
  { 0, 0, 3, -14,  52, 100, -17, 4 }, { 0, 0, 3, -14,  50, 102, -17, 4 },
  { 0, 0, 3, -13,  47, 104, -17, 4 }, { 0, 0, 3, -13,  45, 106, -17, 4 },
  { 0, 0, 3, -12,  42, 108, -16, 3 }, { 0, 0, 3, -11,  40, 109, -16, 3 },
  { 0, 0, 3, -11,  37, 111, -15, 3 }, { 0, 0, 2, -10,  35, 113, -15, 3 },
  { 0, 0, 3, -10,  32, 114, -14, 3 }, { 0, 0, 2, - 9,  29, 116, -13, 3 },
  { 0, 0, 2, - 8,  27, 117, -13, 3 }, { 0, 0, 2, - 8,  25, 119, -12, 2 },
  { 0, 0, 2, - 7,  22, 120, -11, 2 }, { 0, 0, 1, - 6,  20, 121, -10, 2 },
  { 0, 0, 1, - 6,  18, 122, - 9, 2 }, { 0, 0, 1, - 5,  15, 123, - 8, 2 },
  { 0, 0, 1, - 4,  13, 124, - 7, 1 }, { 0, 0, 1, - 4,  11, 125, - 6, 1 },
  { 0, 0, 1, - 3,   8, 126, - 5, 1 }, { 0, 0, 1, - 2,   6, 126, - 4, 1 },
230
  { 0, 0, 0, - 1,   4, 127, - 3, 1 }, { 0, 0, 0,   0,   2, 127, - 1, 0 },
231 232
  // dummy (replicate row index 191)
  { 0, 0, 0,   0,   2, 127, - 1, 0 },
233

234
#elif WARPEDPIXEL_PREC_BITS == 5
235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285
  // [-1, 0)
  {0,   0, 127,   1,   0, 0, 0, 0}, {1,  -3, 127,   4,  -1, 0, 0, 0},
  {1,  -5, 126,   8,  -3, 1, 0, 0}, {1,  -7, 124,  13,  -4, 1, 0, 0},
  {2,  -9, 122,  18,  -6, 1, 0, 0}, {2, -11, 120,  22,  -7, 2, 0, 0},
  {3, -13, 117,  27,  -8, 2, 0, 0}, {3, -14, 114,  32, -10, 3, 0, 0},
  {3, -15, 111,  37, -11, 3, 0, 0}, {3, -16, 108,  42, -12, 3, 0, 0},
  {4, -17, 104,  47, -13, 3, 0, 0}, {4, -17, 100,  52, -14, 3, 0, 0},
  {4, -18,  96,  58, -15, 3, 0, 0}, {4, -18,  91,  63, -16, 4, 0, 0},
  {4, -18,  87,  68, -17, 4, 0, 0}, {4, -18,  82,  73, -17, 4, 0, 0},
  {4, -18,  78,  78, -18, 4, 0, 0}, {4, -17,  73,  82, -18, 4, 0, 0},
  {4, -17,  68,  87, -18, 4, 0, 0}, {4, -16,  63,  91, -18, 4, 0, 0},
  {3, -15,  58,  96, -18, 4, 0, 0}, {3, -14,  52, 100, -17, 4, 0, 0},
  {3, -13,  47, 104, -17, 4, 0, 0}, {3, -12,  42, 108, -16, 3, 0, 0},
  {3, -11,  37, 111, -15, 3, 0, 0}, {3, -10,  32, 114, -14, 3, 0, 0},
  {2,  -8,  27, 117, -13, 3, 0, 0}, {2,  -7,  22, 120, -11, 2, 0, 0},
  {1,  -6,  18, 122,  -9, 2, 0, 0}, {1,  -4,  13, 124,  -7, 1, 0, 0},
  {1,  -3,   8, 126,  -5, 1, 0, 0}, {0,  -1,   4, 127,  -3, 1, 0, 0},
  // [0, 1)
  { 0,  0,   0, 127,   1,   0,   0,  0}, { 0,  1,  -3, 127,   4,  -2,   1,  0},
  { 0,  2,  -6, 126,   8,  -3,   1,  0}, {-1,  3,  -8, 125,  13,  -5,   2, -1},
  {-1,  4, -11, 123,  18,  -7,   3, -1}, {-1,  4, -13, 121,  23,  -8,   3, -1},
  {-1,  5, -15, 119,  27, -10,   4, -1}, {-2,  6, -17, 116,  33, -12,   5, -1},
  {-2,  6, -18, 113,  38, -13,   5, -1}, {-2,  7, -19, 110,  43, -15,   6, -2},
  {-2,  7, -20, 106,  49, -16,   6, -2}, {-2,  7, -21, 102,  54, -17,   7, -2},
  {-2,  8, -22,  98,  59, -18,   7, -2}, {-2,  8, -22,  94,  64, -19,   7, -2},
  {-2,  8, -22,  89,  69, -20,   8, -2}, {-2,  8, -21,  84,  74, -21,   8, -2},
  {-2,  8, -21,  79,  79, -21,   8, -2}, {-2,  8, -21,  74,  84, -21,   8, -2},
  {-2,  8, -20,  69,  89, -22,   8, -2}, {-2,  7, -19,  64,  94, -22,   8, -2},
  {-2,  7, -18,  59,  98, -22,   8, -2}, {-2,  7, -17,  54, 102, -21,   7, -2},
  {-2,  6, -16,  49, 106, -20,   7, -2}, {-2,  6, -15,  43, 110, -19,   7, -2},
  {-1,  5, -13,  38, 113, -18,   6, -2}, {-1,  5, -12,  33, 116, -17,   6, -2},
  {-1,  4, -10,  27, 119, -15,   5, -1}, {-1,  3,  -8,  23, 121, -13,   4, -1},
  {-1,  3,  -7,  18, 123, -11,   4, -1}, {-1,  2,  -5,  13, 125,  -8,   3, -1},
  { 0,  1,  -3,   8, 126,  -6,   2,  0}, { 0,  1,  -2,   4, 127,  -3,   1,  0},
  // [1, 2)
  {0, 0, 0,   1, 127,   0,   0, 0}, {0, 0, 1,  -3, 127,   4,  -1, 0},
  {0, 0, 1,  -5, 126,   8,  -3, 1}, {0, 0, 1,  -7, 124,  13,  -4, 1},
  {0, 0, 2,  -9, 122,  18,  -6, 1}, {0, 0, 2, -11, 120,  22,  -7, 2},
  {0, 0, 3, -13, 117,  27,  -8, 2}, {0, 0, 3, -14, 114,  32, -10, 3},
  {0, 0, 3, -15, 111,  37, -11, 3}, {0, 0, 3, -16, 108,  42, -12, 3},
  {0, 0, 4, -17, 104,  47, -13, 3}, {0, 0, 4, -17, 100,  52, -14, 3},
  {0, 0, 4, -18,  96,  58, -15, 3}, {0, 0, 4, -18,  91,  63, -16, 4},
  {0, 0, 4, -18,  87,  68, -17, 4}, {0, 0, 4, -18,  82,  73, -17, 4},
  {0, 0, 4, -18,  78,  78, -18, 4}, {0, 0, 4, -17,  73,  82, -18, 4},
  {0, 0, 4, -17,  68,  87, -18, 4}, {0, 0, 4, -16,  63,  91, -18, 4},
  {0, 0, 3, -15,  58,  96, -18, 4}, {0, 0, 3, -14,  52, 100, -17, 4},
  {0, 0, 3, -13,  47, 104, -17, 4}, {0, 0, 3, -12,  42, 108, -16, 3},
  {0, 0, 3, -11,  37, 111, -15, 3}, {0, 0, 3, -10,  32, 114, -14, 3},
  {0, 0, 2,  -8,  27, 117, -13, 3}, {0, 0, 2,  -7,  22, 120, -11, 2},
  {0, 0, 1,  -6,  18, 122,  -9, 2}, {0, 0, 1,  -4,  13, 124,  -7, 1},
  {0, 0, 1,  -3,   8, 126,  -5, 1}, {0, 0, 0,  -1,   4, 127,  -3, 1},
286 287
  // dummy (replicate row index 95)
  {0, 0, 0,  -1,   4, 127,  -3, 1},
288 289

#endif  // WARPEDPIXEL_PREC_BITS == 6
290
};
291

292 293
/* clang-format on */

294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328
#define DIV_LUT_PREC_BITS 14
#define DIV_LUT_BITS 8
#define DIV_LUT_NUM (1 << DIV_LUT_BITS)

static const uint16_t div_lut[DIV_LUT_NUM + 1] = {
  16384, 16320, 16257, 16194, 16132, 16070, 16009, 15948, 15888, 15828, 15768,
  15709, 15650, 15592, 15534, 15477, 15420, 15364, 15308, 15252, 15197, 15142,
  15087, 15033, 14980, 14926, 14873, 14821, 14769, 14717, 14665, 14614, 14564,
  14513, 14463, 14413, 14364, 14315, 14266, 14218, 14170, 14122, 14075, 14028,
  13981, 13935, 13888, 13843, 13797, 13752, 13707, 13662, 13618, 13574, 13530,
  13487, 13443, 13400, 13358, 13315, 13273, 13231, 13190, 13148, 13107, 13066,
  13026, 12985, 12945, 12906, 12866, 12827, 12788, 12749, 12710, 12672, 12633,
  12596, 12558, 12520, 12483, 12446, 12409, 12373, 12336, 12300, 12264, 12228,
  12193, 12157, 12122, 12087, 12053, 12018, 11984, 11950, 11916, 11882, 11848,
  11815, 11782, 11749, 11716, 11683, 11651, 11619, 11586, 11555, 11523, 11491,
  11460, 11429, 11398, 11367, 11336, 11305, 11275, 11245, 11215, 11185, 11155,
  11125, 11096, 11067, 11038, 11009, 10980, 10951, 10923, 10894, 10866, 10838,
  10810, 10782, 10755, 10727, 10700, 10673, 10645, 10618, 10592, 10565, 10538,
  10512, 10486, 10460, 10434, 10408, 10382, 10356, 10331, 10305, 10280, 10255,
  10230, 10205, 10180, 10156, 10131, 10107, 10082, 10058, 10034, 10010, 9986,
  9963,  9939,  9916,  9892,  9869,  9846,  9823,  9800,  9777,  9754,  9732,
  9709,  9687,  9664,  9642,  9620,  9598,  9576,  9554,  9533,  9511,  9489,
  9468,  9447,  9425,  9404,  9383,  9362,  9341,  9321,  9300,  9279,  9259,
  9239,  9218,  9198,  9178,  9158,  9138,  9118,  9098,  9079,  9059,  9039,
  9020,  9001,  8981,  8962,  8943,  8924,  8905,  8886,  8867,  8849,  8830,
  8812,  8793,  8775,  8756,  8738,  8720,  8702,  8684,  8666,  8648,  8630,
  8613,  8595,  8577,  8560,  8542,  8525,  8508,  8490,  8473,  8456,  8439,
  8422,  8405,  8389,  8372,  8355,  8339,  8322,  8306,  8289,  8273,  8257,
  8240,  8224,  8208,  8192,
};

// Decomposes a divisor D such that 1/D = y/2^shift, where y is returned
// at precision of DIV_LUT_PREC_BITS along with the shift.
static int16_t resolve_divisor_64(uint64_t D, int16_t *shift) {
  int64_t e, f;
329 330
  *shift = (int16_t)((D >> 32) ? get_msb((unsigned int)(D >> 32)) + 32
                               : get_msb((unsigned int)D));
331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359
  // e is obtained from D after resetting the most significant 1 bit.
  e = D - ((uint64_t)1 << *shift);
  // Get the most significant DIV_LUT_BITS (8) bits of e into f
  if (*shift > DIV_LUT_BITS)
    f = ROUND_POWER_OF_TWO_64(e, *shift - DIV_LUT_BITS);
  else
    f = e << (DIV_LUT_BITS - *shift);
  assert(f <= DIV_LUT_NUM);
  *shift += DIV_LUT_PREC_BITS;
  // Use f as lookup into the precomputed table of multipliers
  return div_lut[f];
}

static int16_t resolve_divisor_32(uint32_t D, int16_t *shift) {
  int32_t e, f;
  *shift = get_msb(D);
  // e is obtained from D after resetting the most significant 1 bit.
  e = D - ((uint32_t)1 << *shift);
  // Get the most significant DIV_LUT_BITS (8) bits of e into f
  if (*shift > DIV_LUT_BITS)
    f = ROUND_POWER_OF_TWO(e, *shift - DIV_LUT_BITS);
  else
    f = e << (DIV_LUT_BITS - *shift);
  assert(f <= DIV_LUT_NUM);
  *shift += DIV_LUT_PREC_BITS;
  // Use f as lookup into the precomputed table of multipliers
  return div_lut[f];
}

Yaowu Xu's avatar
Yaowu Xu committed
360
static int is_affine_valid(const WarpedMotionParams *const wm) {
361 362 363 364
  const int32_t *mat = wm->wmmat;
  return (mat[2] > 0);
}

365 366
static int is_affine_shear_allowed(int16_t alpha, int16_t beta, int16_t gamma,
                                   int16_t delta) {
367 368
  if ((4 * abs(alpha) + 7 * abs(beta) >= (1 << WARPEDMODEL_PREC_BITS)) ||
      (4 * abs(gamma) + 4 * abs(delta) >= (1 << WARPEDMODEL_PREC_BITS)))
369 370 371 372 373 374
    return 0;
  else
    return 1;
}

// Returns 1 on success or 0 on an invalid affine set
375
int get_shear_params(WarpedMotionParams *wm) {
376 377
  const int32_t *mat = wm->wmmat;
  if (!is_affine_valid(wm)) return 0;
378 379 380
  wm->alpha =
      clamp(mat[2] - (1 << WARPEDMODEL_PREC_BITS), INT16_MIN, INT16_MAX);
  wm->beta = clamp(mat[3], INT16_MIN, INT16_MAX);
381 382 383
  int16_t shift;
  int16_t y = resolve_divisor_32(abs(mat[2]), &shift) * (mat[2] < 0 ? -1 : 1);
  int64_t v;
Yaowu Xu's avatar
Yaowu Xu committed
384
  v = ((int64_t)mat[4] * (1 << WARPEDMODEL_PREC_BITS)) * y;
385
  wm->gamma =
386
      clamp((int)ROUND_POWER_OF_TWO_SIGNED_64(v, shift), INT16_MIN, INT16_MAX);
387
  v = ((int64_t)mat[3] * mat[4]) * y;
388
  wm->delta = clamp(mat[5] - (int)ROUND_POWER_OF_TWO_SIGNED_64(v, shift) -
389 390
                        (1 << WARPEDMODEL_PREC_BITS),
                    INT16_MIN, INT16_MAX);
391 392
  if (!is_affine_shear_allowed(wm->alpha, wm->beta, wm->gamma, wm->delta))
    return 0;
393

394 395 396 397 398 399 400 401
  wm->alpha = ROUND_POWER_OF_TWO_SIGNED(wm->alpha, WARP_PARAM_REDUCE_BITS) *
              (1 << WARP_PARAM_REDUCE_BITS);
  wm->beta = ROUND_POWER_OF_TWO_SIGNED(wm->beta, WARP_PARAM_REDUCE_BITS) *
             (1 << WARP_PARAM_REDUCE_BITS);
  wm->gamma = ROUND_POWER_OF_TWO_SIGNED(wm->gamma, WARP_PARAM_REDUCE_BITS) *
              (1 << WARP_PARAM_REDUCE_BITS);
  wm->delta = ROUND_POWER_OF_TWO_SIGNED(wm->delta, WARP_PARAM_REDUCE_BITS) *
              (1 << WARP_PARAM_REDUCE_BITS);
402 403 404
  return 1;
}

405
#if CONFIG_HIGHBITDEPTH
406 407 408
static INLINE int highbd_error_measure(int err, int bd) {
  const int b = bd - 8;
  const int bmask = (1 << b) - 1;
409
  const int v = (1 << b);
410 411 412 413 414 415 416 417
  int e1, e2;
  err = abs(err);
  e1 = err >> b;
  e2 = err & bmask;
  return error_measure_lut[255 + e1] * (v - e2) +
         error_measure_lut[256 + e1] * e2;
}

David Barker's avatar
David Barker committed
418 419 420
/* Note: For an explanation of the warp algorithm, and some notes on bit widths
    for hardware implementations, see the comments above av1_warp_affine_c
*/
421 422 423
void av1_highbd_warp_affine_c(const int32_t *mat, const uint16_t *ref,
                              int width, int height, int stride, uint16_t *pred,
                              int p_col, int p_row, int p_width, int p_height,
clang-format's avatar
clang-format committed
424
                              int p_stride, int subsampling_x,
425 426 427
                              int subsampling_y, int bd,
                              ConvolveParams *conv_params, int16_t alpha,
                              int16_t beta, int16_t gamma, int16_t delta) {
David Barker's avatar
David Barker committed
428
  int32_t tmp[15 * 8];
429
  int i, j, k, l, m;
430
#if CONFIG_CONVOLVE_ROUND
431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446
  const int use_conv_params = conv_params->round == CONVOLVE_OPT_NO_ROUND;
  const int reduce_bits_horiz =
      use_conv_params ? conv_params->round_0 : HORSHEAR_REDUCE_PREC_BITS;
  const int max_bits_horiz =
      use_conv_params
          ? bd + FILTER_BITS + 1 - conv_params->round_0
          : bd + WARPEDPIXEL_FILTER_BITS + 1 - HORSHEAR_REDUCE_PREC_BITS;
  const int offset_bits_horiz =
      use_conv_params ? bd + FILTER_BITS - 1 : bd + WARPEDPIXEL_FILTER_BITS - 1;
  const int offset_bits_vert =
      use_conv_params
          ? bd + 2 * FILTER_BITS - conv_params->round_0
          : bd + 2 * WARPEDPIXEL_FILTER_BITS - HORSHEAR_REDUCE_PREC_BITS;
  if (use_conv_params) {
    conv_params->do_post_rounding = 1;
  }
447
  assert(FILTER_BITS == WARPEDPIXEL_FILTER_BITS);
448 449 450 451 452 453 454 455 456
#else
  const int reduce_bits_horiz = HORSHEAR_REDUCE_PREC_BITS;
  const int max_bits_horiz =
      bd + WARPEDPIXEL_FILTER_BITS + 1 - HORSHEAR_REDUCE_PREC_BITS;
  const int offset_bits_horiz = bd + WARPEDPIXEL_FILTER_BITS - 1;
  const int offset_bits_vert =
      bd + 2 * WARPEDPIXEL_FILTER_BITS - HORSHEAR_REDUCE_PREC_BITS;
#endif
  (void)max_bits_horiz;
457 458 459

  for (i = p_row; i < p_row + p_height; i += 8) {
    for (j = p_col; j < p_col + p_width; j += 8) {
460 461 462 463 464 465 466 467 468 469 470 471 472 473 474
      // Calculate the center of this 8x8 block,
      // project to luma coordinates (if in a subsampled chroma plane),
      // apply the affine transformation,
      // then convert back to the original coordinates (if necessary)
      const int32_t src_x = (j + 4) << subsampling_x;
      const int32_t src_y = (i + 4) << subsampling_y;
      const int32_t dst_x = mat[2] * src_x + mat[3] * src_y + mat[0];
      const int32_t dst_y = mat[4] * src_x + mat[5] * src_y + mat[1];
      const int32_t x4 = dst_x >> subsampling_x;
      const int32_t y4 = dst_y >> subsampling_y;

      int32_t ix4 = x4 >> WARPEDMODEL_PREC_BITS;
      int32_t sx4 = x4 & ((1 << WARPEDMODEL_PREC_BITS) - 1);
      int32_t iy4 = y4 >> WARPEDMODEL_PREC_BITS;
      int32_t sy4 = y4 & ((1 << WARPEDMODEL_PREC_BITS) - 1);
475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506

      sx4 += alpha * (-4) + beta * (-4);
      sy4 += gamma * (-4) + delta * (-4);

      sx4 &= ~((1 << WARP_PARAM_REDUCE_BITS) - 1);
      sy4 &= ~((1 << WARP_PARAM_REDUCE_BITS) - 1);

      // Horizontal filter
      for (k = -7; k < 8; ++k) {
        int iy = iy4 + k;
        if (iy < 0)
          iy = 0;
        else if (iy > height - 1)
          iy = height - 1;

        int sx = sx4 + beta * (k + 4);
        for (l = -4; l < 4; ++l) {
          int ix = ix4 + l - 3;
          const int offs = ROUND_POWER_OF_TWO(sx, WARPEDDIFF_PREC_BITS) +
                           WARPEDPIXEL_PREC_SHIFTS;
          assert(offs >= 0 && offs <= WARPEDPIXEL_PREC_SHIFTS * 3);
          const int16_t *coeffs = warped_filter[offs];

          int32_t sum = 1 << offset_bits_horiz;
          for (m = 0; m < 8; ++m) {
            int sample_x = ix + m;
            if (sample_x < 0)
              sample_x = 0;
            else if (sample_x > width - 1)
              sample_x = width - 1;
            sum += ref[iy * stride + sample_x] * coeffs[m];
          }
507 508
          sum = ROUND_POWER_OF_TWO(sum, reduce_bits_horiz);
          assert(0 <= sum && sum < (1 << max_bits_horiz));
509 510 511 512 513 514 515 516
          tmp[(k + 7) * 8 + (l + 4)] = sum;
          sx += alpha;
        }
      }

      // Vertical filter
      for (k = -4; k < AOMMIN(4, p_row + p_height - i - 4); ++k) {
        int sy = sy4 + delta * (k + 4);
517
        for (l = -4; l < AOMMIN(4, p_col + p_width - j - 4); ++l) {
518 519 520 521 522 523 524 525 526
          const int offs = ROUND_POWER_OF_TWO(sy, WARPEDDIFF_PREC_BITS) +
                           WARPEDPIXEL_PREC_SHIFTS;
          assert(offs >= 0 && offs <= WARPEDPIXEL_PREC_SHIFTS * 3);
          const int16_t *coeffs = warped_filter[offs];

          int32_t sum = 1 << offset_bits_vert;
          for (m = 0; m < 8; ++m) {
            sum += tmp[(k + m + 4) * 8 + (l + 4)] * coeffs[m];
          }
527 528 529 530 531 532 533 534 535 536
#if CONFIG_CONVOLVE_ROUND
          if (use_conv_params) {
            CONV_BUF_TYPE *p =
                &conv_params
                     ->dst[(i - p_row + k + 4) * conv_params->dst_stride +
                           (j - p_col + l + 4)];
            sum = ROUND_POWER_OF_TWO(sum, conv_params->round_1) -
                  (1 << (offset_bits_horiz + FILTER_BITS -
                         conv_params->round_0 - conv_params->round_1)) -
                  (1 << (offset_bits_vert - conv_params->round_1));
537
#if CONFIG_JNT_COMP
Cheng Chen's avatar
Cheng Chen committed
538
            if (conv_params->use_jnt_comp_avg) {
539 540 541 542 543 544 545 546 547 548 549 550 551
              if (conv_params->do_average) {
                *p += sum * conv_params->bck_offset;
                *p = ROUND_POWER_OF_TWO(*p, DIST_PRECISION_BITS - 1);
              } else {
                *p = sum * conv_params->fwd_offset;
              }
            } else {
              if (conv_params->do_average)
                *p += sum;
              else
                *p = sum;
            }
#else
552 553 554 555
            if (conv_params->do_average)
              *p += sum;
            else
              *p = sum;
556
#endif  // CONFIG_JNT_COMP
557 558 559 560 561 562 563 564 565 566 567 568 569 570 571
          } else {
#else
          {
#endif
            uint16_t *p =
                &pred[(i - p_row + k + 4) * p_stride + (j - p_col + l + 4)];
            sum = ROUND_POWER_OF_TWO(sum, VERSHEAR_REDUCE_PREC_BITS);
            assert(0 <= sum && sum < (1 << (bd + 2)));
            uint16_t px =
                clip_pixel_highbd(sum - (1 << (bd - 1)) - (1 << bd), bd);
            if (conv_params->do_average)
              *p = ROUND_POWER_OF_TWO(*p + px, 1);
            else
              *p = px;
          }
572 573 574 575 576 577 578
          sy += gamma;
        }
      }
    }
  }
}

Yaowu Xu's avatar
Yaowu Xu committed
579 580 581 582
static void highbd_warp_plane(WarpedMotionParams *wm, const uint8_t *const ref8,
                              int width, int height, int stride,
                              const uint8_t *const pred8, int p_col, int p_row,
                              int p_width, int p_height, int p_stride,
583
                              int subsampling_x, int subsampling_y, int bd,
584
                              ConvolveParams *conv_params) {
585
  assert(wm->wmtype <= AFFINE);
586 587 588 589
  if (wm->wmtype == ROTZOOM) {
    wm->wmmat[5] = wm->wmmat[2];
    wm->wmmat[4] = -wm->wmmat[3];
  }
590 591 592 593 594 595 596 597 598 599 600 601
  const int32_t *const mat = wm->wmmat;
  const int16_t alpha = wm->alpha;
  const int16_t beta = wm->beta;
  const int16_t gamma = wm->gamma;
  const int16_t delta = wm->delta;

  const uint16_t *const ref = CONVERT_TO_SHORTPTR(ref8);
  uint16_t *pred = CONVERT_TO_SHORTPTR(pred8);
  av1_highbd_warp_affine(mat, ref, width, height, stride, pred, p_col, p_row,
                         p_width, p_height, p_stride, subsampling_x,
                         subsampling_y, bd, conv_params, alpha, beta, gamma,
                         delta);
602 603
}

604
static int64_t highbd_frame_error(const uint16_t *const ref, int stride,
605 606
                                  const uint16_t *const dst, int p_width,
                                  int p_height, int p_stride, int bd) {
607 608 609
  int64_t sum_error = 0;
  for (int i = 0; i < p_height; ++i) {
    for (int j = 0; j < p_width; ++j) {
610 611
      sum_error +=
          highbd_error_measure(dst[j + i * p_stride] - ref[j + i * stride], bd);
612 613 614 615 616 617
    }
  }
  return sum_error;
}

static int64_t highbd_warp_error(
Yaowu Xu's avatar
Yaowu Xu committed
618 619
    WarpedMotionParams *wm, const uint8_t *const ref8, int width, int height,
    int stride, const uint8_t *const dst8, int p_col, int p_row, int p_width,
620 621
    int p_height, int p_stride, int subsampling_x, int subsampling_y, int bd,
    int64_t best_error) {
622
  int64_t gm_sumerr = 0;
623 624 625 626 627
  int warp_w, warp_h;
  int error_bsize_w = AOMMIN(p_width, WARP_ERROR_BLOCK);
  int error_bsize_h = AOMMIN(p_height, WARP_ERROR_BLOCK);
  uint16_t tmp[WARP_ERROR_BLOCK * WARP_ERROR_BLOCK];

628
  ConvolveParams conv_params = get_conv_params(0, 0, 0);
629
#if CONFIG_JNT_COMP
Cheng Chen's avatar
Cheng Chen committed
630
  conv_params.use_jnt_comp_avg = 0;
631
#endif
632 633 634 635 636 637 638 639
  for (int i = p_row; i < p_row + p_height; i += WARP_ERROR_BLOCK) {
    for (int j = p_col; j < p_col + p_width; j += WARP_ERROR_BLOCK) {
      // avoid warping extra 8x8 blocks in the padded region of the frame
      // when p_width and p_height are not multiples of WARP_ERROR_BLOCK
      warp_w = AOMMIN(error_bsize_w, p_col + p_width - j);
      warp_h = AOMMIN(error_bsize_h, p_row + p_height - i);
      highbd_warp_plane(wm, ref8, width, height, stride,
                        CONVERT_TO_BYTEPTR(tmp), j, i, warp_w, warp_h,
640 641
                        WARP_ERROR_BLOCK, subsampling_x, subsampling_y, bd,
                        &conv_params);
642 643 644 645 646 647 648

      gm_sumerr += highbd_frame_error(
          tmp, WARP_ERROR_BLOCK, CONVERT_TO_SHORTPTR(dst8) + j + i * p_stride,
          warp_w, warp_h, p_stride, bd);
      if (gm_sumerr > best_error) return gm_sumerr;
    }
  }
649
  return gm_sumerr;
650
}
651
#endif  // CONFIG_HIGHBITDEPTH
652

653 654 655 656
static INLINE int error_measure(int err) {
  return error_measure_lut[255 + err];
}

657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675
/* The warp filter for ROTZOOM and AFFINE models works as follows:
   * Split the input into 8x8 blocks
   * For each block, project the point (4, 4) within the block, to get the
     overall block position. Split into integer and fractional coordinates,
     maintaining full WARPEDMODEL precision
   * Filter horizontally: Generate 15 rows of 8 pixels each. Each pixel gets a
     variable horizontal offset. This means that, while the rows of the
     intermediate buffer align with the rows of the *reference* image, the
     columns align with the columns of the *destination* image.
   * Filter vertically: Generate the output block (up to 8x8 pixels, but if the
     destination is too small we crop the output at this stage). Each pixel has
     a variable vertical offset, so that the resulting rows are aligned with
     the rows of the destination image.

   To accomplish these alignments, we factor the warp matrix as a
   product of two shear / asymmetric zoom matrices:
   / a b \  = /   1       0    \ * / 1+alpha  beta \
   \ c d /    \ gamma  1+delta /   \    0      1   /
   where a, b, c, d are wmmat[2], wmmat[3], wmmat[4], wmmat[5] respectively.
David Barker's avatar
David Barker committed
676 677
   The horizontal shear (with alpha and beta) is applied first,
   then the vertical shear (with gamma and delta) is applied second.
678 679 680 681 682

   The only limitation is that, to fit this in a fixed 8-tap filter size,
   the fractional pixel offsets must be at most +-1. Since the horizontal filter
   generates 15 rows of 8 columns, and the initial point we project is at (4, 4)
   within the block, the parameters must satisfy
David Barker's avatar
David Barker committed
683
   4 * |alpha| + 7 * |beta| <= 1   and   4 * |gamma| + 4 * |delta| <= 1
684 685
   for this filter to be applicable.

David Barker's avatar
David Barker committed
686
   Note: This function assumes that the caller has done all of the relevant
687 688 689 690 691 692
   checks, ie. that we have a ROTZOOM or AFFINE model, that wm[4] and wm[5]
   are set appropriately (if using a ROTZOOM model), and that alpha, beta,
   gamma, delta are all in range.

   TODO(david.barker): Maybe support scaled references?
*/
David Barker's avatar
David Barker committed
693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741
/* A note on hardware implementation:
    The warp filter is intended to be implementable using the same hardware as
    the high-precision convolve filters from the loop-restoration and
    convolve-round experiments.

    For a single filter stage, considering all of the coefficient sets for the
    warp filter and the regular convolution filter, an input in the range
    [0, 2^k - 1] is mapped into the range [-56 * (2^k - 1), 184 * (2^k - 1)]
    before rounding.

    Allowing for some changes to the filter coefficient sets, call the range
    [-64 * 2^k, 192 * 2^k]. Then, if we initialize the accumulator to 64 * 2^k,
    we can replace this by the range [0, 256 * 2^k], which can be stored in an
    unsigned value with 8 + k bits.

    This allows the derivation of the appropriate bit widths and offsets for
    the various intermediate values: If

    F := WARPEDPIXEL_FILTER_BITS = 7 (or else the above ranges need adjusting)
         So a *single* filter stage maps a k-bit input to a (k + F + 1)-bit
         intermediate value.
    H := HORSHEAR_REDUCE_PREC_BITS
    V := VERSHEAR_REDUCE_PREC_BITS
    (and note that we must have H + V = 2*F for the output to have the same
     scale as the input)

    then we end up with the following offsets and ranges:
    Horizontal filter: Apply an offset of 1 << (bd + F - 1), sum fits into a
                       uint{bd + F + 1}
    After rounding: The values stored in 'tmp' fit into a uint{bd + F + 1 - H}.
    Vertical filter: Apply an offset of 1 << (bd + 2*F - H), sum fits into a
                     uint{bd + 2*F + 2 - H}
    After rounding: The final value, before undoing the offset, fits into a
                    uint{bd + 2}.

    Then we need to undo the offsets before clamping to a pixel. Note that,
    if we do this at the end, the amount to subtract is actually independent
    of H and V:

    offset to subtract = (1 << ((bd + F - 1) - H + F - V)) +
                         (1 << ((bd + 2*F - H) - V))
                      == (1 << (bd - 1)) + (1 << bd)

    This allows us to entirely avoid clamping in both the warp filter and
    the convolve-round experiment. As of the time of writing, the Wiener filter
    from loop-restoration can encode a central coefficient up to 216, which
    leads to a maximum value of about 282 * 2^k after applying the offset.
    So in that case we still need to clamp.
*/
742 743 744
void av1_warp_affine_c(const int32_t *mat, const uint8_t *ref, int width,
                       int height, int stride, uint8_t *pred, int p_col,
                       int p_row, int p_width, int p_height, int p_stride,
745 746 747
                       int subsampling_x, int subsampling_y,
                       ConvolveParams *conv_params, int16_t alpha, int16_t beta,
                       int16_t gamma, int16_t delta) {
David Barker's avatar
David Barker committed
748
  int32_t tmp[15 * 8];
749
  int i, j, k, l, m;
750
  const int bd = 8;
751
#if CONFIG_CONVOLVE_ROUND
752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767
  const int use_conv_params = conv_params->round == CONVOLVE_OPT_NO_ROUND;
  const int reduce_bits_horiz =
      use_conv_params ? conv_params->round_0 : HORSHEAR_REDUCE_PREC_BITS;
  const int max_bits_horiz =
      use_conv_params
          ? bd + FILTER_BITS + 1 - conv_params->round_0
          : bd + WARPEDPIXEL_FILTER_BITS + 1 - HORSHEAR_REDUCE_PREC_BITS;
  const int offset_bits_horiz =
      use_conv_params ? bd + FILTER_BITS - 1 : bd + WARPEDPIXEL_FILTER_BITS - 1;
  const int offset_bits_vert =
      use_conv_params
          ? bd + 2 * FILTER_BITS - conv_params->round_0
          : bd + 2 * WARPEDPIXEL_FILTER_BITS - HORSHEAR_REDUCE_PREC_BITS;
  if (use_conv_params) {
    conv_params->do_post_rounding = 1;
  }
768
  assert(FILTER_BITS == WARPEDPIXEL_FILTER_BITS);
769 770 771 772 773 774 775 776 777
#else
  const int reduce_bits_horiz = HORSHEAR_REDUCE_PREC_BITS;
  const int max_bits_horiz =
      bd + WARPEDPIXEL_FILTER_BITS + 1 - HORSHEAR_REDUCE_PREC_BITS;
  const int offset_bits_horiz = bd + WARPEDPIXEL_FILTER_BITS - 1;
  const int offset_bits_vert =
      bd + 2 * WARPEDPIXEL_FILTER_BITS - HORSHEAR_REDUCE_PREC_BITS;
#endif
  (void)max_bits_horiz;
778 779 780

  for (i = p_row; i < p_row + p_height; i += 8) {
    for (j = p_col; j < p_col + p_width; j += 8) {
781 782 783 784 785 786 787 788 789 790 791 792 793 794 795
      // Calculate the center of this 8x8 block,
      // project to luma coordinates (if in a subsampled chroma plane),
      // apply the affine transformation,
      // then convert back to the original coordinates (if necessary)
      const int32_t src_x = (j + 4) << subsampling_x;
      const int32_t src_y = (i + 4) << subsampling_y;
      const int32_t dst_x = mat[2] * src_x + mat[3] * src_y + mat[0];
      const int32_t dst_y = mat[4] * src_x + mat[5] * src_y + mat[1];
      const int32_t x4 = dst_x >> subsampling_x;
      const int32_t y4 = dst_y >> subsampling_y;

      int32_t ix4 = x4 >> WARPEDMODEL_PREC_BITS;
      int32_t sx4 = x4 & ((1 << WARPEDMODEL_PREC_BITS) - 1);
      int32_t iy4 = y4 >> WARPEDMODEL_PREC_BITS;
      int32_t sy4 = y4 & ((1 << WARPEDMODEL_PREC_BITS) - 1);
796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832

      sx4 += alpha * (-4) + beta * (-4);
      sy4 += gamma * (-4) + delta * (-4);

      sx4 &= ~((1 << WARP_PARAM_REDUCE_BITS) - 1);
      sy4 &= ~((1 << WARP_PARAM_REDUCE_BITS) - 1);

      // Horizontal filter
      for (k = -7; k < 8; ++k) {
        // Clamp to top/bottom edge of the frame
        int iy = iy4 + k;
        if (iy < 0)
          iy = 0;
        else if (iy > height - 1)
          iy = height - 1;

        int sx = sx4 + beta * (k + 4);

        for (l = -4; l < 4; ++l) {
          int ix = ix4 + l - 3;
          // At this point, sx = sx4 + alpha * l + beta * k
          const int offs = ROUND_POWER_OF_TWO(sx, WARPEDDIFF_PREC_BITS) +
                           WARPEDPIXEL_PREC_SHIFTS;
          assert(offs >= 0 && offs <= WARPEDPIXEL_PREC_SHIFTS * 3);
          const int16_t *coeffs = warped_filter[offs];

          int32_t sum = 1 << offset_bits_horiz;
          for (m = 0; m < 8; ++m) {
            // Clamp to left/right edge of the frame
            int sample_x = ix + m;
            if (sample_x < 0)
              sample_x = 0;
            else if (sample_x > width - 1)
              sample_x = width - 1;

            sum += ref[iy * stride + sample_x] * coeffs[m];
          }
833 834
          sum = ROUND_POWER_OF_TWO(sum, reduce_bits_horiz);
          assert(0 <= sum && sum < (1 << max_bits_horiz));
835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853
          tmp[(k + 7) * 8 + (l + 4)] = sum;
          sx += alpha;
        }
      }

      // Vertical filter
      for (k = -4; k < AOMMIN(4, p_row + p_height - i - 4); ++k) {
        int sy = sy4 + delta * (k + 4);
        for (l = -4; l < AOMMIN(4, p_col + p_width - j - 4); ++l) {
          // At this point, sy = sy4 + gamma * l + delta * k
          const int offs = ROUND_POWER_OF_TWO(sy, WARPEDDIFF_PREC_BITS) +
                           WARPEDPIXEL_PREC_SHIFTS;
          assert(offs >= 0 && offs <= WARPEDPIXEL_PREC_SHIFTS * 3);
          const int16_t *coeffs = warped_filter[offs];

          int32_t sum = 1 << offset_bits_vert;
          for (m = 0; m < 8; ++m) {
            sum += tmp[(k + m + 4) * 8 + (l + 4)] * coeffs[m];
          }
854 855 856 857 858 859 860 861 862 863
#if CONFIG_CONVOLVE_ROUND
          if (use_conv_params) {
            CONV_BUF_TYPE *p =
                &conv_params
                     ->dst[(i - p_row + k + 4) * conv_params->dst_stride +
                           (j - p_col + l + 4)];
            sum = ROUND_POWER_OF_TWO(sum, conv_params->round_1) -
                  (1 << (offset_bits_horiz + FILTER_BITS -
                         conv_params->round_0 - conv_params->round_1)) -
                  (1 << (offset_bits_vert - conv_params->round_1));
864
#if CONFIG_JNT_COMP
Cheng Chen's avatar
Cheng Chen committed
865
            if (conv_params->use_jnt_comp_avg) {
866 867 868 869 870 871 872 873 874 875 876 877 878
              if (conv_params->do_average) {
                *p += sum * conv_params->bck_offset;
                *p = ROUND_POWER_OF_TWO(*p, DIST_PRECISION_BITS - 1);
              } else {
                *p = sum * conv_params->fwd_offset;
              }
            } else {
              if (conv_params->do_average)
                *p += sum;
              else
                *p = sum;
            }
#else
879 880 881 882
            if (conv_params->do_average)
              *p += sum;
            else
              *p = sum;
883
#endif  // CONFIG_JNT_COMP
884 885 886 887 888 889 890 891 892 893 894 895 896 897
          } else {
#else
          {
#endif
            uint8_t *p =
                &pred[(i - p_row + k + 4) * p_stride + (j - p_col + l + 4)];
            sum = ROUND_POWER_OF_TWO(sum, VERSHEAR_REDUCE_PREC_BITS);
            assert(0 <= sum && sum < (1 << (bd + 2)));
            uint8_t px = clip_pixel(sum - (1 << (bd - 1)) - (1 << bd));
            if (conv_params->do_average)
              *p = ROUND_POWER_OF_TWO(*p + px, 1);
            else
              *p = px;
          }
898 899 900 901 902 903 904
          sy += gamma;
        }
      }
    }
  }
}

Yaowu Xu's avatar
Yaowu Xu committed
905 906 907 908
static void warp_plane(WarpedMotionParams *wm, const uint8_t *const ref,
                       int width, int height, int stride, uint8_t *pred,
                       int p_col, int p_row, int p_width, int p_height,
                       int p_stride, int subsampling_x, int subsampling_y,
909
                       ConvolveParams *conv_params) {
910
  assert(wm->wmtype <= AFFINE);
911 912 913 914
  if (wm->wmtype == ROTZOOM) {
    wm->wmmat[5] = wm->wmmat[2];
    wm->wmmat[4] = -wm->wmmat[3];
  }
915 916 917 918 919 920 921 922 923
  const int32_t *const mat = wm->wmmat;
  const int16_t alpha = wm->alpha;
  const int16_t beta = wm->beta;
  const int16_t gamma = wm->gamma;
  const int16_t delta = wm->delta;

  av1_warp_affine(mat, ref, width, height, stride, pred, p_col, p_row, p_width,
                  p_height, p_stride, subsampling_x, subsampling_y, conv_params,
                  alpha, beta, gamma, delta);
924 925
}

926
static int64_t frame_error(const uint8_t *const ref, int stride,
927 928
                           const uint8_t *const dst, int p_width, int p_height,
                           int p_stride) {
929 930 931
  int64_t sum_error = 0;
  for (int i = 0; i < p_height; ++i) {
    for (int j = 0; j < p_width; ++j) {
932 933
      sum_error +=
          (int64_t)error_measure(dst[j + i * p_stride] - ref[j + i * stride]);
934 935 936 937 938 939 940 941 942
    }
  }
  return sum_error;
}

static int64_t warp_error(WarpedMotionParams *wm, const uint8_t *const ref,
                          int width, int height, int stride,
                          const uint8_t *const dst, int p_col, int p_row,
                          int p_width, int p_height, int p_stride,
943 944
                          int subsampling_x, int subsampling_y,
                          int64_t best_error) {
945
  int64_t gm_sumerr = 0;
946 947 948 949
  int warp_w, warp_h;
  int error_bsize_w = AOMMIN(p_width, WARP_ERROR_BLOCK);
  int error_bsize_h = AOMMIN(p_height, WARP_ERROR_BLOCK);
  uint8_t tmp[WARP_ERROR_BLOCK * WARP_ERROR_BLOCK];
950
  ConvolveParams conv_params = get_conv_params(0, 0, 0);
951
#if CONFIG_JNT_COMP
Cheng Chen's avatar
Cheng Chen committed
952
  conv_params.use_jnt_comp_avg = 0;
953
#endif
954 955 956 957 958 959 960 961

  for (int i = p_row; i < p_row + p_height; i += WARP_ERROR_BLOCK) {
    for (int j = p_col; j < p_col + p_width; j += WARP_ERROR_BLOCK) {
      // avoid warping extra 8x8 blocks in the padded region of the frame
      // when p_width and p_height are not multiples of WARP_ERROR_BLOCK
      warp_w = AOMMIN(error_bsize_w, p_col + p_width - j);
      warp_h = AOMMIN(error_bsize_h, p_row + p_height - i);
      warp_plane(wm, ref, width, height, stride, tmp, j, i, warp_w, warp_h,
962
                 WARP_ERROR_BLOCK, subsampling_x, subsampling_y, &conv_params);
963 964 965 966 967 968

      gm_sumerr += frame_error(tmp, WARP_ERROR_BLOCK, dst + j + i * p_stride,
                               warp_w, warp_h, p_stride);
      if (gm_sumerr > best_error) return gm_sumerr;
    }
  }
969 970 971 972 973 974 975
  return gm_sumerr;
}

int64_t av1_frame_error(
#if CONFIG_HIGHBITDEPTH
    int use_hbd, int bd,
#endif  // CONFIG_HIGHBITDEPTH
976 977
    const uint8_t *ref, int stride, uint8_t *dst, int p_width, int p_height,
    int p_stride) {
978 979 980
#if CONFIG_HIGHBITDEPTH
  if (use_hbd) {
    return highbd_frame_error(CONVERT_TO_SHORTPTR(ref), stride,
981 982
                              CONVERT_TO_SHORTPTR(dst), p_width, p_height,
                              p_stride, bd);
983 984
  }
#endif  // CONFIG_HIGHBITDEPTH
985
  return frame_error(ref, stride, dst, p_width, p_height, p_stride);
986 987
}

988
int64_t av1_warp_error(WarpedMotionParams *wm,
989
#if CONFIG_HIGHBITDEPTH
990
                       int use_hbd, int bd,
991
#endif  // CONFIG_HIGHBITDEPTH
992 993 994
                       const uint8_t *ref, int width, int height, int stride,
                       uint8_t *dst, int p_col, int p_row, int p_width,
                       int p_height, int p_stride, int subsampling_x,
995
                       int subsampling_y, int64_t best_error) {
996 997
  if (wm->wmtype <= AFFINE)
    if (!get_shear_params(wm)) return 1;
998
#if CONFIG_HIGHBITDEPTH
999
  if (use_hbd)
1000 1001
    return highbd_warp_error(wm, ref, width, height, stride, dst, p_col, p_row,
                             p_width, p_height, p_stride, subsampling_x,
1002
                             subsampling_y, bd, best_error);
1003
#endif  // CONFIG_HIGHBITDEPTH
1004
  return warp_error(wm, ref, width, height, stride, dst, p_col, p_row, p_width,
1005 1006
                    p_height, p_stride, subsampling_x, subsampling_y,
                    best_error);
1007 1008
}

Yaowu Xu's avatar
Yaowu Xu committed
1009
void av1_warp_plane(WarpedMotionParams *wm,
1010
#if CONFIG_HIGHBITDEPTH
Yaowu Xu's avatar
Yaowu Xu committed
1011
                    int use_hbd, int bd,
1012
#endif  // CONFIG_HIGHBITDEPTH
Yaowu Xu's avatar
Yaowu Xu committed
1013
                    const uint8_t *ref, int width, int height, int stride,
Yaowu Xu's avatar
Yaowu Xu committed
1014 1015
                    uint8_t *pred, int p_col, int p_row, int p_width,