rdopt.c 444 KB
Newer Older
Jingning Han's avatar
Jingning Han committed
1
/*
Yaowu Xu's avatar
Yaowu Xu committed
2
 * Copyright (c) 2016, Alliance for Open Media. All rights reserved
Jingning Han's avatar
Jingning Han committed
3
 *
Yaowu Xu's avatar
Yaowu Xu committed
4
5
6
7
8
9
 * This source code is subject to the terms of the BSD 2 Clause License and
 * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
 * was not distributed with this source code in the LICENSE file, you can
 * obtain it at www.aomedia.org/license/software. If the Alliance for Open
 * Media Patent License 1.0 was not distributed with this source code in the
 * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
Jingning Han's avatar
Jingning Han committed
10
11
12
13
14
 */

#include <assert.h>
#include <math.h>

Yaowu Xu's avatar
Yaowu Xu committed
15
#include "./aom_dsp_rtcd.h"
Jingning Han's avatar
Jingning Han committed
16
#include "./av1_rtcd.h"
Jingning Han's avatar
Jingning Han committed
17

Yaowu Xu's avatar
Yaowu Xu committed
18
#include "aom_dsp/aom_dsp_common.h"
19
#include "aom_dsp/blend.h"
Yaowu Xu's avatar
Yaowu Xu committed
20
#include "aom_mem/aom_mem.h"
21
22
#include "aom_ports/mem.h"
#include "aom_ports/system_state.h"
Jingning Han's avatar
Jingning Han committed
23

24
25
26
#if CONFIG_CFL
#include "av1/common/cfl.h"
#endif
27
28
29
30
31
32
#include "av1/common/common.h"
#include "av1/common/common_data.h"
#include "av1/common/entropy.h"
#include "av1/common/entropymode.h"
#include "av1/common/idct.h"
#include "av1/common/mvref_common.h"
33
#include "av1/common/obmc.h"
34
35
36
37
38
39
#include "av1/common/pred_common.h"
#include "av1/common/quant_common.h"
#include "av1/common/reconinter.h"
#include "av1/common/reconintra.h"
#include "av1/common/scan.h"
#include "av1/common/seg_common.h"
40
41
42
#if CONFIG_LV_MAP
#include "av1/common/txb_common.h"
#endif
Yue Chen's avatar
Yue Chen committed
43
#include "av1/common/warped_motion.h"
Jingning Han's avatar
Jingning Han committed
44

Jingning Han's avatar
Jingning Han committed
45
#include "av1/encoder/aq_variance.h"
46
#include "av1/encoder/av1_quantize.h"
47
48
49
50
#include "av1/encoder/cost.h"
#include "av1/encoder/encodemb.h"
#include "av1/encoder/encodemv.h"
#include "av1/encoder/encoder.h"
51
52
53
#if CONFIG_LV_MAP
#include "av1/encoder/encodetxb.h"
#endif
54
55
56
57
58
59
#include "av1/encoder/hybrid_fwd_txfm.h"
#include "av1/encoder/mcomp.h"
#include "av1/encoder/palette.h"
#include "av1/encoder/ratectrl.h"
#include "av1/encoder/rd.h"
#include "av1/encoder/rdopt.h"
60
#include "av1/encoder/tokenize.h"
61
#include "av1/encoder/tx_prune_model_weights.h"
Yushin Cho's avatar
Yushin Cho committed
62

63
#if CONFIG_DUAL_FILTER
Angie Chiang's avatar
Angie Chiang committed
64
#define DUAL_FILTER_SET_SIZE (SWITCHABLE_FILTERS * SWITCHABLE_FILTERS)
65
#if USE_EXTRA_FILTER
Angie Chiang's avatar
Angie Chiang committed
66
static const int filter_sets[DUAL_FILTER_SET_SIZE][2] = {
67
68
69
  { 0, 0 }, { 0, 1 }, { 0, 2 }, { 0, 3 }, { 1, 0 }, { 1, 1 },
  { 1, 2 }, { 1, 3 }, { 2, 0 }, { 2, 1 }, { 2, 2 }, { 2, 3 },
  { 3, 0 }, { 3, 1 }, { 3, 2 }, { 3, 3 },
70
};
71
72
73
74
75
76
#else   // USE_EXTRA_FILTER
static const int filter_sets[DUAL_FILTER_SET_SIZE][2] = {
  { 0, 0 }, { 0, 1 }, { 0, 2 }, { 1, 0 }, { 1, 1 },
  { 1, 2 }, { 2, 0 }, { 2, 1 }, { 2, 2 },
};
#endif  // USE_EXTRA_FILTER
Angie Chiang's avatar
Angie Chiang committed
77
#endif  // CONFIG_DUAL_FILTER
78

Zoe Liu's avatar
Zoe Liu committed
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
#define LAST_FRAME_MODE_MASK                                          \
  ((1 << INTRA_FRAME) | (1 << LAST2_FRAME) | (1 << LAST3_FRAME) |     \
   (1 << GOLDEN_FRAME) | (1 << BWDREF_FRAME) | (1 << ALTREF2_FRAME) | \
   (1 << ALTREF_FRAME))
#define LAST2_FRAME_MODE_MASK                                         \
  ((1 << INTRA_FRAME) | (1 << LAST_FRAME) | (1 << LAST3_FRAME) |      \
   (1 << GOLDEN_FRAME) | (1 << BWDREF_FRAME) | (1 << ALTREF2_FRAME) | \
   (1 << ALTREF_FRAME))
#define LAST3_FRAME_MODE_MASK                                         \
  ((1 << INTRA_FRAME) | (1 << LAST_FRAME) | (1 << LAST2_FRAME) |      \
   (1 << GOLDEN_FRAME) | (1 << BWDREF_FRAME) | (1 << ALTREF2_FRAME) | \
   (1 << ALTREF_FRAME))
#define GOLDEN_FRAME_MODE_MASK                                       \
  ((1 << INTRA_FRAME) | (1 << LAST_FRAME) | (1 << LAST2_FRAME) |     \
   (1 << LAST3_FRAME) | (1 << BWDREF_FRAME) | (1 << ALTREF2_FRAME) | \
   (1 << ALTREF_FRAME))
#define BWDREF_FRAME_MODE_MASK                                       \
  ((1 << INTRA_FRAME) | (1 << LAST_FRAME) | (1 << LAST2_FRAME) |     \
   (1 << LAST3_FRAME) | (1 << GOLDEN_FRAME) | (1 << ALTREF2_FRAME) | \
   (1 << ALTREF_FRAME))
#define ALTREF2_FRAME_MODE_MASK                                     \
  ((1 << INTRA_FRAME) | (1 << LAST_FRAME) | (1 << LAST2_FRAME) |    \
   (1 << LAST3_FRAME) | (1 << GOLDEN_FRAME) | (1 << BWDREF_FRAME) | \
   (1 << ALTREF_FRAME))
#define ALTREF_FRAME_MODE_MASK                                      \
  ((1 << INTRA_FRAME) | (1 << LAST_FRAME) | (1 << LAST2_FRAME) |    \
   (1 << LAST3_FRAME) | (1 << GOLDEN_FRAME) | (1 << BWDREF_FRAME) | \
   (1 << ALTREF2_FRAME))

Zoe Liu's avatar
Zoe Liu committed
108
#if CONFIG_EXT_COMP_REFS
109
110
111
#define SECOND_REF_FRAME_MASK                                         \
  ((1 << ALTREF_FRAME) | (1 << ALTREF2_FRAME) | (1 << BWDREF_FRAME) | \
   (1 << GOLDEN_FRAME) | (1 << LAST2_FRAME) | 0x01)
112
#else  // !CONFIG_EXT_COMP_REFS
Zoe Liu's avatar
Zoe Liu committed
113
114
#define SECOND_REF_FRAME_MASK \
  ((1 << ALTREF_FRAME) | (1 << ALTREF2_FRAME) | (1 << BWDREF_FRAME) | 0x01)
Zoe Liu's avatar
Zoe Liu committed
115
#endif  // CONFIG_EXT_COMP_REFS
Jingning Han's avatar
Jingning Han committed
116

117
118
#define MIN_EARLY_TERM_INDEX 3
#define NEW_MV_DISCOUNT_FACTOR 8
Jingning Han's avatar
Jingning Han committed
119

120
121
122
123
124
#if CONFIG_EXT_INTRA
#define ANGLE_SKIP_THRESH 10
#define FILTER_FAST_SEARCH 1
#endif  // CONFIG_EXT_INTRA

125
126
127
// Setting this to 1 will disable trellis optimization within the
// transform search. Trellis optimization will still be applied
// in the final encode.
128
#ifndef DISABLE_TRELLISQ_SEARCH
129
#define DISABLE_TRELLISQ_SEARCH 0
130
#endif
131

132
133
134
135
136
137
static const double ADST_FLIP_SVM[8] = {
  /* vertical */
  -6.6623, -2.8062, -3.2531, 3.1671,
  /* horizontal */
  -7.7051, -3.2234, -3.6193, 3.4533
};
138

Jingning Han's avatar
Jingning Han committed
139
140
141
142
143
typedef struct {
  PREDICTION_MODE mode;
  MV_REFERENCE_FRAME ref_frame[2];
} MODE_DEFINITION;

144
typedef struct { MV_REFERENCE_FRAME ref_frame[2]; } REF_DEFINITION;
Jingning Han's avatar
Jingning Han committed
145
146

struct rdcost_block_args {
Yaowu Xu's avatar
Yaowu Xu committed
147
  const AV1_COMP *cpi;
Jingning Han's avatar
Jingning Han committed
148
  MACROBLOCK *x;
149
150
  ENTROPY_CONTEXT t_above[2 * MAX_MIB_SIZE];
  ENTROPY_CONTEXT t_left[2 * MAX_MIB_SIZE];
151
  RD_STATS rd_stats;
Jingning Han's avatar
Jingning Han committed
152
153
154
155
156
157
158
  int64_t this_rd;
  int64_t best_rd;
  int exit_early;
  int use_fast_coef_costing;
};

#define LAST_NEW_MV_INDEX 6
Yaowu Xu's avatar
Yaowu Xu committed
159
static const MODE_DEFINITION av1_mode_order[MAX_MODES] = {
Emil Keyder's avatar
Emil Keyder committed
160
161
162
163
  { NEARESTMV, { LAST_FRAME, NONE_FRAME } },
  { NEARESTMV, { LAST2_FRAME, NONE_FRAME } },
  { NEARESTMV, { LAST3_FRAME, NONE_FRAME } },
  { NEARESTMV, { BWDREF_FRAME, NONE_FRAME } },
Zoe Liu's avatar
Zoe Liu committed
164
  { NEARESTMV, { ALTREF2_FRAME, NONE_FRAME } },
Emil Keyder's avatar
Emil Keyder committed
165
166
  { NEARESTMV, { ALTREF_FRAME, NONE_FRAME } },
  { NEARESTMV, { GOLDEN_FRAME, NONE_FRAME } },
Jingning Han's avatar
Jingning Han committed
167

Emil Keyder's avatar
Emil Keyder committed
168
  { DC_PRED, { INTRA_FRAME, NONE_FRAME } },
Jingning Han's avatar
Jingning Han committed
169

Emil Keyder's avatar
Emil Keyder committed
170
171
172
173
  { NEWMV, { LAST_FRAME, NONE_FRAME } },
  { NEWMV, { LAST2_FRAME, NONE_FRAME } },
  { NEWMV, { LAST3_FRAME, NONE_FRAME } },
  { NEWMV, { BWDREF_FRAME, NONE_FRAME } },
Zoe Liu's avatar
Zoe Liu committed
174
  { NEWMV, { ALTREF2_FRAME, NONE_FRAME } },
Emil Keyder's avatar
Emil Keyder committed
175
176
  { NEWMV, { ALTREF_FRAME, NONE_FRAME } },
  { NEWMV, { GOLDEN_FRAME, NONE_FRAME } },
Jingning Han's avatar
Jingning Han committed
177

Emil Keyder's avatar
Emil Keyder committed
178
179
180
181
  { NEARMV, { LAST_FRAME, NONE_FRAME } },
  { NEARMV, { LAST2_FRAME, NONE_FRAME } },
  { NEARMV, { LAST3_FRAME, NONE_FRAME } },
  { NEARMV, { BWDREF_FRAME, NONE_FRAME } },
Zoe Liu's avatar
Zoe Liu committed
182
  { NEARMV, { ALTREF2_FRAME, NONE_FRAME } },
Emil Keyder's avatar
Emil Keyder committed
183
184
  { NEARMV, { ALTREF_FRAME, NONE_FRAME } },
  { NEARMV, { GOLDEN_FRAME, NONE_FRAME } },
Jingning Han's avatar
Jingning Han committed
185

Sarah Parker's avatar
Sarah Parker committed
186
187
188
189
190
191
192
  { GLOBALMV, { LAST_FRAME, NONE_FRAME } },
  { GLOBALMV, { LAST2_FRAME, NONE_FRAME } },
  { GLOBALMV, { LAST3_FRAME, NONE_FRAME } },
  { GLOBALMV, { BWDREF_FRAME, NONE_FRAME } },
  { GLOBALMV, { ALTREF2_FRAME, NONE_FRAME } },
  { GLOBALMV, { GOLDEN_FRAME, NONE_FRAME } },
  { GLOBALMV, { ALTREF_FRAME, NONE_FRAME } },
Jingning Han's avatar
Jingning Han committed
193

194
// TODO(zoeliu): May need to reconsider the order on the modes to check
195

196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
#if CONFIG_COMPOUND_SINGLEREF
  // Single ref comp mode
  { SR_NEAREST_NEARMV, { LAST_FRAME, NONE_FRAME } },
  { SR_NEAREST_NEARMV, { LAST2_FRAME, NONE_FRAME } },
  { SR_NEAREST_NEARMV, { LAST3_FRAME, NONE_FRAME } },
  { SR_NEAREST_NEARMV, { BWDREF_FRAME, NONE_FRAME } },
  { SR_NEAREST_NEARMV, { GOLDEN_FRAME, NONE_FRAME } },
  { SR_NEAREST_NEARMV, { ALTREF_FRAME, NONE_FRAME } },

  /*
  { SR_NEAREST_NEWMV, { LAST_FRAME, NONE_FRAME } },
  { SR_NEAREST_NEWMV, { LAST2_FRAME, NONE_FRAME } },
  { SR_NEAREST_NEWMV, { LAST3_FRAME, NONE_FRAME } },
  { SR_NEAREST_NEWMV, { BWDREF_FRAME, NONE_FRAME } },
  { SR_NEAREST_NEWMV, { GOLDEN_FRAME, NONE_FRAME } },
  { SR_NEAREST_NEWMV, { ALTREF_FRAME, NONE_FRAME } },*/

  { SR_NEAR_NEWMV, { LAST_FRAME, NONE_FRAME } },
  { SR_NEAR_NEWMV, { LAST2_FRAME, NONE_FRAME } },
  { SR_NEAR_NEWMV, { LAST3_FRAME, NONE_FRAME } },
  { SR_NEAR_NEWMV, { BWDREF_FRAME, NONE_FRAME } },
  { SR_NEAR_NEWMV, { GOLDEN_FRAME, NONE_FRAME } },
  { SR_NEAR_NEWMV, { ALTREF_FRAME, NONE_FRAME } },

  { SR_ZERO_NEWMV, { LAST_FRAME, NONE_FRAME } },
  { SR_ZERO_NEWMV, { LAST2_FRAME, NONE_FRAME } },
  { SR_ZERO_NEWMV, { LAST3_FRAME, NONE_FRAME } },
  { SR_ZERO_NEWMV, { BWDREF_FRAME, NONE_FRAME } },
  { SR_ZERO_NEWMV, { GOLDEN_FRAME, NONE_FRAME } },
  { SR_ZERO_NEWMV, { ALTREF_FRAME, NONE_FRAME } },

  { SR_NEW_NEWMV, { LAST_FRAME, NONE_FRAME } },
  { SR_NEW_NEWMV, { LAST2_FRAME, NONE_FRAME } },
  { SR_NEW_NEWMV, { LAST3_FRAME, NONE_FRAME } },
  { SR_NEW_NEWMV, { BWDREF_FRAME, NONE_FRAME } },
  { SR_NEW_NEWMV, { GOLDEN_FRAME, NONE_FRAME } },
  { SR_NEW_NEWMV, { ALTREF_FRAME, NONE_FRAME } },
#endif  // CONFIG_COMPOUND_SINGLEREF

235
236
237
238
239
240
241
242
  { NEAREST_NEARESTMV, { LAST_FRAME, ALTREF_FRAME } },
  { NEAREST_NEARESTMV, { LAST2_FRAME, ALTREF_FRAME } },
  { NEAREST_NEARESTMV, { LAST3_FRAME, ALTREF_FRAME } },
  { NEAREST_NEARESTMV, { GOLDEN_FRAME, ALTREF_FRAME } },
  { NEAREST_NEARESTMV, { LAST_FRAME, BWDREF_FRAME } },
  { NEAREST_NEARESTMV, { LAST2_FRAME, BWDREF_FRAME } },
  { NEAREST_NEARESTMV, { LAST3_FRAME, BWDREF_FRAME } },
  { NEAREST_NEARESTMV, { GOLDEN_FRAME, BWDREF_FRAME } },
Zoe Liu's avatar
Zoe Liu committed
243
244
245
246
  { NEAREST_NEARESTMV, { LAST_FRAME, ALTREF2_FRAME } },
  { NEAREST_NEARESTMV, { LAST2_FRAME, ALTREF2_FRAME } },
  { NEAREST_NEARESTMV, { LAST3_FRAME, ALTREF2_FRAME } },
  { NEAREST_NEARESTMV, { GOLDEN_FRAME, ALTREF2_FRAME } },
Zoe Liu's avatar
Zoe Liu committed
247
248
249

#if CONFIG_EXT_COMP_REFS
  { NEAREST_NEARESTMV, { LAST_FRAME, LAST2_FRAME } },
250
  { NEAREST_NEARESTMV, { LAST_FRAME, LAST3_FRAME } },
Zoe Liu's avatar
Zoe Liu committed
251
252
253
  { NEAREST_NEARESTMV, { LAST_FRAME, GOLDEN_FRAME } },
  { NEAREST_NEARESTMV, { BWDREF_FRAME, ALTREF_FRAME } },
#endif  // CONFIG_EXT_COMP_REFS
254

Urvang Joshi's avatar
Urvang Joshi committed
255
  { PAETH_PRED, { INTRA_FRAME, NONE_FRAME } },
Jingning Han's avatar
Jingning Han committed
256

Emil Keyder's avatar
Emil Keyder committed
257
  { SMOOTH_PRED, { INTRA_FRAME, NONE_FRAME } },
Urvang Joshi's avatar
Urvang Joshi committed
258
259
260
261
#if CONFIG_SMOOTH_HV
  { SMOOTH_V_PRED, { INTRA_FRAME, NONE_FRAME } },
  { SMOOTH_H_PRED, { INTRA_FRAME, NONE_FRAME } },
#endif  // CONFIG_SMOOTH_HV
262

263
264
265
266
267
268
  { NEAR_NEARMV, { LAST_FRAME, ALTREF_FRAME } },
  { NEW_NEARESTMV, { LAST_FRAME, ALTREF_FRAME } },
  { NEAREST_NEWMV, { LAST_FRAME, ALTREF_FRAME } },
  { NEW_NEARMV, { LAST_FRAME, ALTREF_FRAME } },
  { NEAR_NEWMV, { LAST_FRAME, ALTREF_FRAME } },
  { NEW_NEWMV, { LAST_FRAME, ALTREF_FRAME } },
Sarah Parker's avatar
Sarah Parker committed
269
  { GLOBAL_GLOBALMV, { LAST_FRAME, ALTREF_FRAME } },
270

271
272
273
274
275
276
  { NEAR_NEARMV, { LAST2_FRAME, ALTREF_FRAME } },
  { NEW_NEARESTMV, { LAST2_FRAME, ALTREF_FRAME } },
  { NEAREST_NEWMV, { LAST2_FRAME, ALTREF_FRAME } },
  { NEW_NEARMV, { LAST2_FRAME, ALTREF_FRAME } },
  { NEAR_NEWMV, { LAST2_FRAME, ALTREF_FRAME } },
  { NEW_NEWMV, { LAST2_FRAME, ALTREF_FRAME } },
Sarah Parker's avatar
Sarah Parker committed
277
  { GLOBAL_GLOBALMV, { LAST2_FRAME, ALTREF_FRAME } },
278
279
280
281
282
283
284

  { NEAR_NEARMV, { LAST3_FRAME, ALTREF_FRAME } },
  { NEW_NEARESTMV, { LAST3_FRAME, ALTREF_FRAME } },
  { NEAREST_NEWMV, { LAST3_FRAME, ALTREF_FRAME } },
  { NEW_NEARMV, { LAST3_FRAME, ALTREF_FRAME } },
  { NEAR_NEWMV, { LAST3_FRAME, ALTREF_FRAME } },
  { NEW_NEWMV, { LAST3_FRAME, ALTREF_FRAME } },
Sarah Parker's avatar
Sarah Parker committed
285
  { GLOBAL_GLOBALMV, { LAST3_FRAME, ALTREF_FRAME } },
286

287
288
289
290
291
292
  { NEAR_NEARMV, { GOLDEN_FRAME, ALTREF_FRAME } },
  { NEW_NEARESTMV, { GOLDEN_FRAME, ALTREF_FRAME } },
  { NEAREST_NEWMV, { GOLDEN_FRAME, ALTREF_FRAME } },
  { NEW_NEARMV, { GOLDEN_FRAME, ALTREF_FRAME } },
  { NEAR_NEWMV, { GOLDEN_FRAME, ALTREF_FRAME } },
  { NEW_NEWMV, { GOLDEN_FRAME, ALTREF_FRAME } },
Sarah Parker's avatar
Sarah Parker committed
293
  { GLOBAL_GLOBALMV, { GOLDEN_FRAME, ALTREF_FRAME } },
294

295
296
297
298
299
300
  { NEAR_NEARMV, { LAST_FRAME, BWDREF_FRAME } },
  { NEW_NEARESTMV, { LAST_FRAME, BWDREF_FRAME } },
  { NEAREST_NEWMV, { LAST_FRAME, BWDREF_FRAME } },
  { NEW_NEARMV, { LAST_FRAME, BWDREF_FRAME } },
  { NEAR_NEWMV, { LAST_FRAME, BWDREF_FRAME } },
  { NEW_NEWMV, { LAST_FRAME, BWDREF_FRAME } },
Sarah Parker's avatar
Sarah Parker committed
301
  { GLOBAL_GLOBALMV, { LAST_FRAME, BWDREF_FRAME } },
302
303
304
305
306
307
308

  { NEAR_NEARMV, { LAST2_FRAME, BWDREF_FRAME } },
  { NEW_NEARESTMV, { LAST2_FRAME, BWDREF_FRAME } },
  { NEAREST_NEWMV, { LAST2_FRAME, BWDREF_FRAME } },
  { NEW_NEARMV, { LAST2_FRAME, BWDREF_FRAME } },
  { NEAR_NEWMV, { LAST2_FRAME, BWDREF_FRAME } },
  { NEW_NEWMV, { LAST2_FRAME, BWDREF_FRAME } },
Sarah Parker's avatar
Sarah Parker committed
309
  { GLOBAL_GLOBALMV, { LAST2_FRAME, BWDREF_FRAME } },
310
311
312
313
314
315
316

  { NEAR_NEARMV, { LAST3_FRAME, BWDREF_FRAME } },
  { NEW_NEARESTMV, { LAST3_FRAME, BWDREF_FRAME } },
  { NEAREST_NEWMV, { LAST3_FRAME, BWDREF_FRAME } },
  { NEW_NEARMV, { LAST3_FRAME, BWDREF_FRAME } },
  { NEAR_NEWMV, { LAST3_FRAME, BWDREF_FRAME } },
  { NEW_NEWMV, { LAST3_FRAME, BWDREF_FRAME } },
Sarah Parker's avatar
Sarah Parker committed
317
  { GLOBAL_GLOBALMV, { LAST3_FRAME, BWDREF_FRAME } },
318
319
320
321
322
323
324

  { NEAR_NEARMV, { GOLDEN_FRAME, BWDREF_FRAME } },
  { NEW_NEARESTMV, { GOLDEN_FRAME, BWDREF_FRAME } },
  { NEAREST_NEWMV, { GOLDEN_FRAME, BWDREF_FRAME } },
  { NEW_NEARMV, { GOLDEN_FRAME, BWDREF_FRAME } },
  { NEAR_NEWMV, { GOLDEN_FRAME, BWDREF_FRAME } },
  { NEW_NEWMV, { GOLDEN_FRAME, BWDREF_FRAME } },
Sarah Parker's avatar
Sarah Parker committed
325
  { GLOBAL_GLOBALMV, { GOLDEN_FRAME, BWDREF_FRAME } },
Zoe Liu's avatar
Zoe Liu committed
326

Zoe Liu's avatar
Zoe Liu committed
327
328
329
330
331
332
  { NEAR_NEARMV, { LAST_FRAME, ALTREF2_FRAME } },
  { NEW_NEARESTMV, { LAST_FRAME, ALTREF2_FRAME } },
  { NEAREST_NEWMV, { LAST_FRAME, ALTREF2_FRAME } },
  { NEW_NEARMV, { LAST_FRAME, ALTREF2_FRAME } },
  { NEAR_NEWMV, { LAST_FRAME, ALTREF2_FRAME } },
  { NEW_NEWMV, { LAST_FRAME, ALTREF2_FRAME } },
Sarah Parker's avatar
Sarah Parker committed
333
  { GLOBAL_GLOBALMV, { LAST_FRAME, ALTREF2_FRAME } },
Zoe Liu's avatar
Zoe Liu committed
334
335
336
337
338
339
340

  { NEAR_NEARMV, { LAST2_FRAME, ALTREF2_FRAME } },
  { NEW_NEARESTMV, { LAST2_FRAME, ALTREF2_FRAME } },
  { NEAREST_NEWMV, { LAST2_FRAME, ALTREF2_FRAME } },
  { NEW_NEARMV, { LAST2_FRAME, ALTREF2_FRAME } },
  { NEAR_NEWMV, { LAST2_FRAME, ALTREF2_FRAME } },
  { NEW_NEWMV, { LAST2_FRAME, ALTREF2_FRAME } },
Sarah Parker's avatar
Sarah Parker committed
341
  { GLOBAL_GLOBALMV, { LAST2_FRAME, ALTREF2_FRAME } },
Zoe Liu's avatar
Zoe Liu committed
342
343
344
345
346
347
348

  { NEAR_NEARMV, { LAST3_FRAME, ALTREF2_FRAME } },
  { NEW_NEARESTMV, { LAST3_FRAME, ALTREF2_FRAME } },
  { NEAREST_NEWMV, { LAST3_FRAME, ALTREF2_FRAME } },
  { NEW_NEARMV, { LAST3_FRAME, ALTREF2_FRAME } },
  { NEAR_NEWMV, { LAST3_FRAME, ALTREF2_FRAME } },
  { NEW_NEWMV, { LAST3_FRAME, ALTREF2_FRAME } },
Sarah Parker's avatar
Sarah Parker committed
349
  { GLOBAL_GLOBALMV, { LAST3_FRAME, ALTREF2_FRAME } },
Zoe Liu's avatar
Zoe Liu committed
350
351
352
353
354
355
356

  { NEAR_NEARMV, { GOLDEN_FRAME, ALTREF2_FRAME } },
  { NEW_NEARESTMV, { GOLDEN_FRAME, ALTREF2_FRAME } },
  { NEAREST_NEWMV, { GOLDEN_FRAME, ALTREF2_FRAME } },
  { NEW_NEARMV, { GOLDEN_FRAME, ALTREF2_FRAME } },
  { NEAR_NEWMV, { GOLDEN_FRAME, ALTREF2_FRAME } },
  { NEW_NEWMV, { GOLDEN_FRAME, ALTREF2_FRAME } },
Sarah Parker's avatar
Sarah Parker committed
357
  { GLOBAL_GLOBALMV, { GOLDEN_FRAME, ALTREF2_FRAME } },
Zoe Liu's avatar
Zoe Liu committed
358

Zoe Liu's avatar
Zoe Liu committed
359
360
361
362
363
364
365
#if CONFIG_EXT_COMP_REFS
  { NEAR_NEARMV, { LAST_FRAME, LAST2_FRAME } },
  { NEW_NEARESTMV, { LAST_FRAME, LAST2_FRAME } },
  { NEAREST_NEWMV, { LAST_FRAME, LAST2_FRAME } },
  { NEW_NEARMV, { LAST_FRAME, LAST2_FRAME } },
  { NEAR_NEWMV, { LAST_FRAME, LAST2_FRAME } },
  { NEW_NEWMV, { LAST_FRAME, LAST2_FRAME } },
Sarah Parker's avatar
Sarah Parker committed
366
  { GLOBAL_GLOBALMV, { LAST_FRAME, LAST2_FRAME } },
Zoe Liu's avatar
Zoe Liu committed
367

368
369
370
371
372
373
  { NEAR_NEARMV, { LAST_FRAME, LAST3_FRAME } },
  { NEW_NEARESTMV, { LAST_FRAME, LAST3_FRAME } },
  { NEAREST_NEWMV, { LAST_FRAME, LAST3_FRAME } },
  { NEW_NEARMV, { LAST_FRAME, LAST3_FRAME } },
  { NEAR_NEWMV, { LAST_FRAME, LAST3_FRAME } },
  { NEW_NEWMV, { LAST_FRAME, LAST3_FRAME } },
Sarah Parker's avatar
Sarah Parker committed
374
  { GLOBAL_GLOBALMV, { LAST_FRAME, LAST3_FRAME } },
375

Zoe Liu's avatar
Zoe Liu committed
376
377
378
379
380
381
  { NEAR_NEARMV, { LAST_FRAME, GOLDEN_FRAME } },
  { NEW_NEARESTMV, { LAST_FRAME, GOLDEN_FRAME } },
  { NEAREST_NEWMV, { LAST_FRAME, GOLDEN_FRAME } },
  { NEW_NEARMV, { LAST_FRAME, GOLDEN_FRAME } },
  { NEAR_NEWMV, { LAST_FRAME, GOLDEN_FRAME } },
  { NEW_NEWMV, { LAST_FRAME, GOLDEN_FRAME } },
Sarah Parker's avatar
Sarah Parker committed
382
  { GLOBAL_GLOBALMV, { LAST_FRAME, GOLDEN_FRAME } },
Zoe Liu's avatar
Zoe Liu committed
383
384
385
386
387
388
389

  { NEAR_NEARMV, { BWDREF_FRAME, ALTREF_FRAME } },
  { NEW_NEARESTMV, { BWDREF_FRAME, ALTREF_FRAME } },
  { NEAREST_NEWMV, { BWDREF_FRAME, ALTREF_FRAME } },
  { NEW_NEARMV, { BWDREF_FRAME, ALTREF_FRAME } },
  { NEAR_NEWMV, { BWDREF_FRAME, ALTREF_FRAME } },
  { NEW_NEWMV, { BWDREF_FRAME, ALTREF_FRAME } },
Sarah Parker's avatar
Sarah Parker committed
390
  { GLOBAL_GLOBALMV, { BWDREF_FRAME, ALTREF_FRAME } },
Zoe Liu's avatar
Zoe Liu committed
391
#endif  // CONFIG_EXT_COMP_REFS
392

Emil Keyder's avatar
Emil Keyder committed
393
394
395
396
397
398
399
400
  { H_PRED, { INTRA_FRAME, NONE_FRAME } },
  { V_PRED, { INTRA_FRAME, NONE_FRAME } },
  { D135_PRED, { INTRA_FRAME, NONE_FRAME } },
  { D207_PRED, { INTRA_FRAME, NONE_FRAME } },
  { D153_PRED, { INTRA_FRAME, NONE_FRAME } },
  { D63_PRED, { INTRA_FRAME, NONE_FRAME } },
  { D117_PRED, { INTRA_FRAME, NONE_FRAME } },
  { D45_PRED, { INTRA_FRAME, NONE_FRAME } },
401

Sarah Parker's avatar
Sarah Parker committed
402
  { GLOBALMV, { LAST_FRAME, INTRA_FRAME } },
403
404
405
  { NEARESTMV, { LAST_FRAME, INTRA_FRAME } },
  { NEARMV, { LAST_FRAME, INTRA_FRAME } },
  { NEWMV, { LAST_FRAME, INTRA_FRAME } },
406

Sarah Parker's avatar
Sarah Parker committed
407
  { GLOBALMV, { LAST2_FRAME, INTRA_FRAME } },
408
409
410
411
  { NEARESTMV, { LAST2_FRAME, INTRA_FRAME } },
  { NEARMV, { LAST2_FRAME, INTRA_FRAME } },
  { NEWMV, { LAST2_FRAME, INTRA_FRAME } },

Sarah Parker's avatar
Sarah Parker committed
412
  { GLOBALMV, { LAST3_FRAME, INTRA_FRAME } },
413
414
415
  { NEARESTMV, { LAST3_FRAME, INTRA_FRAME } },
  { NEARMV, { LAST3_FRAME, INTRA_FRAME } },
  { NEWMV, { LAST3_FRAME, INTRA_FRAME } },
416

Sarah Parker's avatar
Sarah Parker committed
417
  { GLOBALMV, { GOLDEN_FRAME, INTRA_FRAME } },
418
419
420
  { NEARESTMV, { GOLDEN_FRAME, INTRA_FRAME } },
  { NEARMV, { GOLDEN_FRAME, INTRA_FRAME } },
  { NEWMV, { GOLDEN_FRAME, INTRA_FRAME } },
421

Sarah Parker's avatar
Sarah Parker committed
422
  { GLOBALMV, { BWDREF_FRAME, INTRA_FRAME } },
423
424
425
  { NEARESTMV, { BWDREF_FRAME, INTRA_FRAME } },
  { NEARMV, { BWDREF_FRAME, INTRA_FRAME } },
  { NEWMV, { BWDREF_FRAME, INTRA_FRAME } },
Zoe Liu's avatar
Zoe Liu committed
426

Sarah Parker's avatar
Sarah Parker committed
427
  { GLOBALMV, { ALTREF2_FRAME, INTRA_FRAME } },
Zoe Liu's avatar
Zoe Liu committed
428
429
430
  { NEARESTMV, { ALTREF2_FRAME, INTRA_FRAME } },
  { NEARMV, { ALTREF2_FRAME, INTRA_FRAME } },
  { NEWMV, { ALTREF2_FRAME, INTRA_FRAME } },
431

Sarah Parker's avatar
Sarah Parker committed
432
  { GLOBALMV, { ALTREF_FRAME, INTRA_FRAME } },
433
434
435
  { NEARESTMV, { ALTREF_FRAME, INTRA_FRAME } },
  { NEARMV, { ALTREF_FRAME, INTRA_FRAME } },
  { NEWMV, { ALTREF_FRAME, INTRA_FRAME } },
Jingning Han's avatar
Jingning Han committed
436
437
};

hui su's avatar
hui su committed
438
static const PREDICTION_MODE intra_rd_search_mode_order[INTRA_MODES] = {
Urvang Joshi's avatar
Urvang Joshi committed
439
  DC_PRED,       H_PRED,        V_PRED,    SMOOTH_PRED, PAETH_PRED,
Urvang Joshi's avatar
Urvang Joshi committed
440
#if CONFIG_SMOOTH_HV
hui su's avatar
hui su committed
441
  SMOOTH_V_PRED, SMOOTH_H_PRED,
Urvang Joshi's avatar
Urvang Joshi committed
442
#endif  // CONFIG_SMOOTH_HV
Urvang Joshi's avatar
Urvang Joshi committed
443
  D135_PRED,     D207_PRED,     D153_PRED, D63_PRED,    D117_PRED,  D45_PRED,
hui su's avatar
hui su committed
444
445
};

Luc Trudeau's avatar
Luc Trudeau committed
446
447
#if CONFIG_CFL
static const UV_PREDICTION_MODE uv_rd_search_mode_order[UV_INTRA_MODES] = {
Urvang Joshi's avatar
Urvang Joshi committed
448
  UV_DC_PRED,       UV_CFL_PRED,      UV_H_PRED,
Urvang Joshi's avatar
Urvang Joshi committed
449
  UV_V_PRED,        UV_SMOOTH_PRED,   UV_PAETH_PRED,
Urvang Joshi's avatar
Urvang Joshi committed
450
#if CONFIG_SMOOTH_HV
Luc Trudeau's avatar
Luc Trudeau committed
451
  UV_SMOOTH_V_PRED, UV_SMOOTH_H_PRED,
Urvang Joshi's avatar
Urvang Joshi committed
452
453
454
#endif  // CONFIG_SMOOTH_HV
  UV_D135_PRED,     UV_D207_PRED,     UV_D153_PRED,
  UV_D63_PRED,      UV_D117_PRED,     UV_D45_PRED,
Luc Trudeau's avatar
Luc Trudeau committed
455
456
457
458
459
};
#else
#define uv_rd_search_mode_order intra_rd_search_mode_order
#endif  // CONFIG_CFL

hui su's avatar
hui su committed
460
static INLINE int write_uniform_cost(int n, int v) {
461
462
  const int l = get_unsigned_bits(n);
  const int m = (1 << l) - n;
463
  if (l == 0) return 0;
hui su's avatar
hui su committed
464
  if (v < m)
Yaowu Xu's avatar
Yaowu Xu committed
465
    return (l - 1) * av1_cost_bit(128, 0);
hui su's avatar
hui su committed
466
  else
Yaowu Xu's avatar
Yaowu Xu committed
467
    return l * av1_cost_bit(128, 0);
hui su's avatar
hui su committed
468
469
}

470
471
472
// constants for prune 1 and prune 2 decision boundaries
#define FAST_EXT_TX_CORR_MID 0.0
#define FAST_EXT_TX_EDST_MID 0.1
473
474
475
#define FAST_EXT_TX_CORR_MARGIN 0.5
#define FAST_EXT_TX_EDST_MARGIN 0.3

476
477
478
479
480
int inter_block_yrd(const AV1_COMP *cpi, MACROBLOCK *x, RD_STATS *rd_stats,
                    BLOCK_SIZE bsize, int64_t ref_best_rd, int fast);
int inter_block_uvrd(const AV1_COMP *cpi, MACROBLOCK *x, RD_STATS *rd_stats,
                     BLOCK_SIZE bsize, int64_t ref_best_rd, int fast);

481
482
483
484
485
486
487
488
static unsigned pixel_dist_visible_only(
    const AV1_COMP *const cpi, const MACROBLOCK *x, const uint8_t *src,
    const int src_stride, const uint8_t *dst, const int dst_stride,
    const BLOCK_SIZE tx_bsize, int txb_rows, int txb_cols, int visible_rows,
    int visible_cols) {
  unsigned sse;

  if (txb_rows == visible_rows && txb_cols == visible_cols
489
#if CONFIG_RECT_TX_EXT
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
      && tx_bsize < BLOCK_SIZES
#endif
      ) {
    cpi->fn_ptr[tx_bsize].vf(src, src_stride, dst, dst_stride, &sse);
    return sse;
  }
#if CONFIG_HIGHBITDEPTH
  const MACROBLOCKD *xd = &x->e_mbd;

  if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
    uint64_t sse64 = aom_highbd_sse_odd_size(src, src_stride, dst, dst_stride,
                                             visible_cols, visible_rows);
    return (unsigned int)ROUND_POWER_OF_TWO(sse64, (xd->bd - 8) * 2);
  }
#else
  (void)x;
#endif  // CONFIG_HIGHBITDEPTH
  sse = aom_sse_odd_size(src, src_stride, dst, dst_stride, visible_cols,
                         visible_rows);
  return sse;
}

512
#if CONFIG_DIST_8X8
Yushin Cho's avatar
Yushin Cho committed
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
static uint64_t cdef_dist_8x8_16bit(uint16_t *dst, int dstride, uint16_t *src,
                                    int sstride, int coeff_shift) {
  uint64_t svar = 0;
  uint64_t dvar = 0;
  uint64_t sum_s = 0;
  uint64_t sum_d = 0;
  uint64_t sum_s2 = 0;
  uint64_t sum_d2 = 0;
  uint64_t sum_sd = 0;
  uint64_t dist = 0;

  int i, j;
  for (i = 0; i < 8; i++) {
    for (j = 0; j < 8; j++) {
      sum_s += src[i * sstride + j];
      sum_d += dst[i * dstride + j];
      sum_s2 += src[i * sstride + j] * src[i * sstride + j];
      sum_d2 += dst[i * dstride + j] * dst[i * dstride + j];
      sum_sd += src[i * sstride + j] * dst[i * dstride + j];
    }
  }
  /* Compute the variance -- the calculation cannot go negative. */
  svar = sum_s2 - ((sum_s * sum_s + 32) >> 6);
  dvar = sum_d2 - ((sum_d * sum_d + 32) >> 6);

  // Tuning of jm's original dering distortion metric used in CDEF tool,
  // suggested by jm
  const uint64_t a = 4;
  const uint64_t b = 2;
  const uint64_t c1 = (400 * a << 2 * coeff_shift);
  const uint64_t c2 = (b * 20000 * a * a << 4 * coeff_shift);

  dist =
      (uint64_t)floor(.5 +
                      (sum_d2 + sum_s2 - 2 * sum_sd) * .5 * (svar + dvar + c1) /
                          (sqrt(svar * (double)dvar + c2)));

  // Calibrate dist to have similar rate for the same QP with MSE only
  // distortion (as in master branch)
  dist = (uint64_t)((float)dist * 0.75);

  return dist;
}

Yushin Cho's avatar
Yushin Cho committed
557
static int od_compute_var_4x4(uint16_t *x, int stride) {
Yushin Cho's avatar
Yushin Cho committed
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
  int sum;
  int s2;
  int i;
  sum = 0;
  s2 = 0;
  for (i = 0; i < 4; i++) {
    int j;
    for (j = 0; j < 4; j++) {
      int t;

      t = x[i * stride + j];
      sum += t;
      s2 += t * t;
    }
  }
Yushin Cho's avatar
Yushin Cho committed
573

Yushin Cho's avatar
Yushin Cho committed
574
575
576
  return (s2 - (sum * sum >> 4)) >> 4;
}

577
578
579
580
581
582
583
/* OD_DIST_LP_MID controls the frequency weighting filter used for computing
   the distortion. For a value X, the filter is [1 X 1]/(X + 2) and
   is applied both horizontally and vertically. For X=5, the filter is
   a good approximation for the OD_QM8_Q4_HVS quantization matrix. */
#define OD_DIST_LP_MID (5)
#define OD_DIST_LP_NORM (OD_DIST_LP_MID + 2)

Yushin Cho's avatar
Yushin Cho committed
584
585
static double od_compute_dist_8x8(int use_activity_masking, uint16_t *x,
                                  uint16_t *y, od_coeff *e_lp, int stride) {
Yushin Cho's avatar
Yushin Cho committed
586
587
588
589
590
591
592
593
594
595
596
  double sum;
  int min_var;
  double mean_var;
  double var_stat;
  double activity;
  double calibration;
  int i;
  int j;
  double vardist;

  vardist = 0;
Yushin Cho's avatar
Yushin Cho committed
597

Yushin Cho's avatar
Yushin Cho committed
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
#if 1
  min_var = INT_MAX;
  mean_var = 0;
  for (i = 0; i < 3; i++) {
    for (j = 0; j < 3; j++) {
      int varx;
      int vary;
      varx = od_compute_var_4x4(x + 2 * i * stride + 2 * j, stride);
      vary = od_compute_var_4x4(y + 2 * i * stride + 2 * j, stride);
      min_var = OD_MINI(min_var, varx);
      mean_var += 1. / (1 + varx);
      /* The cast to (double) is to avoid an overflow before the sqrt.*/
      vardist += varx - 2 * sqrt(varx * (double)vary) + vary;
    }
  }
  /* We use a different variance statistic depending on whether activity
James Zern's avatar
James Zern committed
614
     masking is used, since the harmonic mean appeared slightly worse with
Yushin Cho's avatar
Yushin Cho committed
615
616
617
618
619
620
621
622
623
624
625
626
627
628
     masking off. The calibration constant just ensures that we preserve the
     rate compared to activity=1. */
  if (use_activity_masking) {
    calibration = 1.95;
    var_stat = 9. / mean_var;
  } else {
    calibration = 1.62;
    var_stat = min_var;
  }
  /* 1.62 is a calibration constant, 0.25 is a noise floor and 1/6 is the
     activity masking constant. */
  activity = calibration * pow(.25 + var_stat, -1. / 6);
#else
  activity = 1;
629
#endif  // 1
Yushin Cho's avatar
Yushin Cho committed
630
631
632
  sum = 0;
  for (i = 0; i < 8; i++) {
    for (j = 0; j < 8; j++)
633
      sum += e_lp[i * stride + j] * (double)e_lp[i * stride + j];
Yushin Cho's avatar
Yushin Cho committed
634
  }
635
636
637
  /* Normalize the filter to unit DC response. */
  sum *= 1. / (OD_DIST_LP_NORM * OD_DIST_LP_NORM * OD_DIST_LP_NORM *
               OD_DIST_LP_NORM);
Yushin Cho's avatar
Yushin Cho committed
638
639
640
641
  return activity * activity * (sum + vardist);
}

// Note : Inputs x and y are in a pixel domain
Yushin Cho's avatar
Yushin Cho committed
642
643
static double od_compute_dist_common(int activity_masking, uint16_t *x,
                                     uint16_t *y, int bsize_w, int bsize_h,
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
                                     int qindex, od_coeff *tmp,
                                     od_coeff *e_lp) {
  int i, j;
  double sum = 0;
  const int mid = OD_DIST_LP_MID;

  for (j = 0; j < bsize_w; j++) {
    e_lp[j] = mid * tmp[j] + 2 * tmp[bsize_w + j];
    e_lp[(bsize_h - 1) * bsize_w + j] = mid * tmp[(bsize_h - 1) * bsize_w + j] +
                                        2 * tmp[(bsize_h - 2) * bsize_w + j];
  }
  for (i = 1; i < bsize_h - 1; i++) {
    for (j = 0; j < bsize_w; j++) {
      e_lp[i * bsize_w + j] = mid * tmp[i * bsize_w + j] +
                              tmp[(i - 1) * bsize_w + j] +
                              tmp[(i + 1) * bsize_w + j];
    }
  }
  for (i = 0; i < bsize_h; i += 8) {
    for (j = 0; j < bsize_w; j += 8) {
Yushin Cho's avatar
Yushin Cho committed
664
      sum += od_compute_dist_8x8(activity_masking, &x[i * bsize_w + j],
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
                                 &y[i * bsize_w + j], &e_lp[i * bsize_w + j],
                                 bsize_w);
    }
  }
  /* Scale according to linear regression against SSE, for 8x8 blocks. */
  if (activity_masking) {
    sum *= 2.2 + (1.7 - 2.2) * (qindex - 99) / (210 - 99) +
           (qindex < 99 ? 2.5 * (qindex - 99) / 99 * (qindex - 99) / 99 : 0);
  } else {
    sum *= qindex >= 128
               ? 1.4 + (0.9 - 1.4) * (qindex - 128) / (209 - 128)
               : qindex <= 43 ? 1.5 + (2.0 - 1.5) * (qindex - 43) / (16 - 43)
                              : 1.5 + (1.4 - 1.5) * (qindex - 43) / (128 - 43);
  }

  return sum;
}

Yushin Cho's avatar
Yushin Cho committed
683
684
static double od_compute_dist(uint16_t *x, uint16_t *y, int bsize_w,
                              int bsize_h, int qindex) {
Yushin Cho's avatar
Yushin Cho committed
685
  assert(bsize_w >= 8 && bsize_h >= 8);
Yushin Cho's avatar
Yushin Cho committed
686

Yushin Cho's avatar
Yushin Cho committed
687
  int activity_masking = 0;
Yushin Cho's avatar
Yushin Cho committed
688

Luc Trudeau's avatar
Luc Trudeau committed
689
690
691
692
693
694
695
  int i, j;
  DECLARE_ALIGNED(16, od_coeff, e[MAX_TX_SQUARE]);
  DECLARE_ALIGNED(16, od_coeff, tmp[MAX_TX_SQUARE]);
  DECLARE_ALIGNED(16, od_coeff, e_lp[MAX_TX_SQUARE]);
  for (i = 0; i < bsize_h; i++) {
    for (j = 0; j < bsize_w; j++) {
      e[i * bsize_w + j] = x[i * bsize_w + j] - y[i * bsize_w + j];
696
    }
Luc Trudeau's avatar
Luc Trudeau committed
697
698
699
700
701
702
703
704
705
  }
  int mid = OD_DIST_LP_MID;
  for (i = 0; i < bsize_h; i++) {
    tmp[i * bsize_w] = mid * e[i * bsize_w] + 2 * e[i * bsize_w + 1];
    tmp[i * bsize_w + bsize_w - 1] =
        mid * e[i * bsize_w + bsize_w - 1] + 2 * e[i * bsize_w + bsize_w - 2];
    for (j = 1; j < bsize_w - 1; j++) {
      tmp[i * bsize_w + j] = mid * e[i * bsize_w + j] + e[i * bsize_w + j - 1] +
                             e[i * bsize_w + j + 1];
706
    }
707
  }
Luc Trudeau's avatar
Luc Trudeau committed
708
709
  return od_compute_dist_common(activity_masking, x, y, bsize_w, bsize_h,
                                qindex, tmp, e_lp);
710
711
}

Yushin Cho's avatar
Yushin Cho committed
712
713
static double od_compute_dist_diff(uint16_t *x, int16_t *e, int bsize_w,
                                   int bsize_h, int qindex) {
714
  assert(bsize_w >= 8 && bsize_h >= 8);
Yushin Cho's avatar
Yushin Cho committed
715

Yushin Cho's avatar
Yushin Cho committed
716
  int activity_masking = 0;
Yushin Cho's avatar
Yushin Cho committed
717

Luc Trudeau's avatar
Luc Trudeau committed
718
719
720
721
722
723
724
  DECLARE_ALIGNED(16, uint16_t, y[MAX_TX_SQUARE]);
  DECLARE_ALIGNED(16, od_coeff, tmp[MAX_TX_SQUARE]);
  DECLARE_ALIGNED(16, od_coeff, e_lp[MAX_TX_SQUARE]);
  int i, j;
  for (i = 0; i < bsize_h; i++) {
    for (j = 0; j < bsize_w; j++) {
      y[i * bsize_w + j] = x[i * bsize_w + j] - e[i * bsize_w + j];
725
    }
Luc Trudeau's avatar
Luc Trudeau committed
726
727
728
729
730
731
732
733
734
  }
  int mid = OD_DIST_LP_MID;
  for (i = 0; i < bsize_h; i++) {
    tmp[i * bsize_w] = mid * e[i * bsize_w] + 2 * e[i * bsize_w + 1];
    tmp[i * bsize_w + bsize_w - 1] =
        mid * e[i * bsize_w + bsize_w - 1] + 2 * e[i * bsize_w + bsize_w - 2];
    for (j = 1; j < bsize_w - 1; j++) {
      tmp[i * bsize_w + j] = mid * e[i * bsize_w + j] + e[i * bsize_w + j - 1] +
                             e[i * bsize_w + j + 1];
Yushin Cho's avatar
Yushin Cho committed
735
736
    }
  }
Luc Trudeau's avatar
Luc Trudeau committed
737
738
  return od_compute_dist_common(activity_masking, x, y, bsize_w, bsize_h,
                                qindex, tmp, e_lp);
Yushin Cho's avatar
Yushin Cho committed
739
740
}

741
int64_t av1_dist_8x8(const AV1_COMP *const cpi, const MACROBLOCK *x,
Yushin Cho's avatar
Yushin Cho committed
742
743
744
745
                     const uint8_t *src, int src_stride, const uint8_t *dst,
                     int dst_stride, const BLOCK_SIZE tx_bsize, int bsw,
                     int bsh, int visible_w, int visible_h, int qindex) {
  int64_t d = 0;
Yushin Cho's avatar
Yushin Cho committed
746
  int i, j;
747
  const MACROBLOCKD *xd = &x->e_mbd;
Yushin Cho's avatar
Yushin Cho committed
748
749
750
751

  DECLARE_ALIGNED(16, uint16_t, orig[MAX_TX_SQUARE]);
  DECLARE_ALIGNED(16, uint16_t, rec[MAX_TX_SQUARE]);

Yushin Cho's avatar
Yushin Cho committed
752
753
754
755
756
  assert(bsw >= 8);
  assert(bsh >= 8);
  assert((bsw & 0x07) == 0);
  assert((bsh & 0x07) == 0);

757
758
  if (x->tune_metric == AOM_TUNE_CDEF_DIST ||
      x->tune_metric == AOM_TUNE_DAALA_DIST) {
759
#if CONFIG_HIGHBITDEPTH
760
    if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
761
      for (j = 0; j < bsh; j++)
762
        for (i = 0; i < bsw; i++)
763
          orig[j * bsw + i] = CONVERT_TO_SHORTPTR(src)[j * src_stride + i];
764

765
      if ((bsw == visible_w) && (bsh == visible_h)) {
766
767
        for (j = 0; j < bsh; j++)
          for (i = 0; i < bsw; i++)
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
            rec[j * bsw + i] = CONVERT_TO_SHORTPTR(dst)[j * dst_stride + i];
      } else {
        for (j = 0; j < visible_h; j++)
          for (i = 0; i < visible_w; i++)
            rec[j * bsw + i] = CONVERT_TO_SHORTPTR(dst)[j * dst_stride + i];

        if (visible_w < bsw) {
          for (j = 0; j < bsh; j++)
            for (i = visible_w; i < bsw; i++)
              rec[j * bsw + i] = CONVERT_TO_SHORTPTR(src)[j * src_stride + i];
        }

        if (visible_h < bsh) {
          for (j = visible_h; j < bsh; j++)
            for (i = 0; i < bsw; i++)
              rec[j * bsw + i] = CONVERT_TO_SHORTPTR(src)[j * src_stride + i];
        }
785
      }
786
    } else {
787
788
#endif
      for (j = 0; j < bsh; j++)
789
        for (i = 0; i < bsw; i++) orig[j * bsw + i] = src[j * src_stride + i];
790

791
      if ((bsw == visible_w) && (bsh == visible_h)) {
792
        for (j = 0; j < bsh; j++)
793
794
795
796
797
798
799
800
801
802
803
          for (i = 0; i < bsw; i++) rec[j * bsw + i] = dst[j * dst_stride + i];
      } else {
        for (j = 0; j < visible_h; j++)
          for (i = 0; i < visible_w; i++)
            rec[j * bsw + i] = dst[j * dst_stride + i];

        if (visible_w < bsw) {
          for (j = 0; j < bsh; j++)
            for (i = visible_w; i < bsw; i++)
              rec[j * bsw + i] = src[j * src_stride + i];
        }
804

805
806
807
808
809
        if (visible_h < bsh) {
          for (j = visible_h; j < bsh; j++)
            for (i = 0; i < bsw; i++)
              rec[j * bsw + i] = src[j * src_stride + i];
        }
810
811
      }
#if CONFIG_HIGHBITDEPTH
812
    }
813
#endif  // CONFIG_HIGHBITDEPTH
814
  }
Yushin Cho's avatar
Yushin Cho committed
815

816
817
818
  if (x->tune_metric == AOM_TUNE_DAALA_DIST) {
    d = (int64_t)od_compute_dist(orig, rec, bsw, bsh, qindex);
  } else if (x->tune_metric == AOM_TUNE_CDEF_DIST) {
Yushin Cho's avatar
Yushin Cho committed
819
820
821
822
823
824
825
826
827
828
829
830
    int coeff_shift = AOMMAX(xd->bd - 8, 0);

    for (i = 0; i < bsh; i += 8) {
      for (j = 0; j < bsw; j += 8) {
        d += cdef_dist_8x8_16bit(&rec[i * bsw + j], bsw, &orig[i * bsw + j],
                                 bsw, coeff_shift);
      }
    }
#if CONFIG_HIGHBITDEPTH
    if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH)
      d = ((uint64_t)d) >> 2 * coeff_shift;
#endif
831
832
  } else {
    // Otherwise, MSE by default
833
834
    d = pixel_dist_visible_only(cpi, x, src, src_stride, dst, dst_stride,
                                tx_bsize, bsh, bsw, visible_h, visible_w);
Yushin Cho's avatar
Yushin Cho committed
835
  }
836

Yushin Cho's avatar
Yushin Cho committed
837
838
  return d;
}
839

840
static int64_t av1_dist_8x8_diff(const MACROBLOCK *x, const uint8_t *src,
Yushin Cho's avatar
Yushin Cho committed
841
842
843
844
                                 int src_stride, const int16_t *diff,
                                 int diff_stride, int bsw, int bsh,
                                 int visible_w, int visible_h, int qindex) {
  int64_t d = 0;
845
  int i, j;
846
  const MACROBLOCKD *xd = &x->e_mbd;
Yushin Cho's avatar
Yushin Cho committed
847
848
849
850

  DECLARE_ALIGNED(16, uint16_t, orig[MAX_TX_SQUARE]);
  DECLARE_ALIGNED(16, int16_t, diff16[MAX_TX_SQUARE]);

Yushin Cho's avatar
Yushin Cho committed
851
852
853
854
855
  assert(bsw >= 8);
  assert(bsh >= 8);
  assert((bsw & 0x07) == 0);
  assert((bsh & 0x07) == 0);

856
857
  if (x->tune_metric == AOM_TUNE_CDEF_DIST ||
      x->tune_metric == AOM_TUNE_DAALA_DIST) {
858
#if CONFIG_HIGHBITDEPTH
859
860
861
862
863
    if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
      for (j = 0; j < bsh; j++)
        for (i = 0; i < bsw; i++)
          orig[j * bsw + i] = CONVERT_TO_SHORTPTR(src)[j * src_stride + i];
    } else {
864
#endif
865
866
      for (j = 0; j < bsh; j++)
        for (i = 0; i < bsw; i++) orig[j * bsw + i] = src[j * src_stride + i];
867
#if CONFIG_HIGHBITDEPTH
868
    }
869
#endif  // CONFIG_HIGHBITDEPTH
870

871
    if ((bsw == visible_w) && (bsh == visible_h)) {
872
      for (j = 0; j < bsh; j++)
873
874
875
876
877
878
879
880
881
882
883
        for (i = 0; i < bsw; i++)
          diff16[j * bsw + i] = diff[j * diff_stride + i];
    } else {
      for (j = 0; j < visible_h; j++)
        for (i = 0; i < visible_w; i++)
          diff16[j * bsw + i] = diff[j * diff_stride + i];

      if (visible_w < bsw) {
        for (j = 0; j < bsh; j++)
          for (i = visible_w; i < bsw; i++) diff16[j * bsw + i] = 0;
      }
884

885
886
887
888
      if (visible_h < bsh) {
        for (j = visible_h; j < bsh; j++)
          for (i = 0; i < bsw; i++) diff16[j * bsw + i] = 0;
      }
889
890
    }
  }
891

892
893
894
  if (x->tune_metric == AOM_TUNE_DAALA_DIST) {
    d = (int64_t)od_compute_dist_diff(orig, diff16, bsw, bsh, qindex);
  } else if (x->tune_metric == AOM_TUNE_CDEF_DIST) {
Yushin Cho's avatar
Yushin Cho committed
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
    int coeff_shift = AOMMAX(xd->bd - 8, 0);
    DECLARE_ALIGNED(16, uint16_t, dst16[MAX_TX_SQUARE]);

    for (i = 0; i < bsh; i++) {
      for (j = 0; j < bsw; j++) {
        dst16[i * bsw + j] = orig[i * bsw + j] - diff16[i * bsw + j];
      }
    }

    for (i = 0; i < bsh; i += 8) {
      for (j = 0; j < bsw; j += 8) {
        d += cdef_dist_8x8_16bit(&dst16[i * bsw + j], bsw, &orig[i * bsw + j],
                                 bsw, coeff_shift);
      }
    }
    // Don't scale 'd' for HBD since it will be done by caller side for diff
    // input
912
913
  } else {
    // Otherwise, MSE by default
914
    d = aom_sum_squares_2d_i16(diff, diff_stride, visible_w, visible_h);
Yushin Cho's avatar
Yushin Cho committed
915
  }
916
917
918

  return d;
}
Yushin Cho's avatar
Yushin Cho committed
919
#endif  // CONFIG_DIST_8X8
Yushin Cho's avatar
Yushin Cho committed
920

Yaowu Xu's avatar
Yaowu Xu committed
921
static void get_energy_distribution_fine(const AV1_COMP *cpi, BLOCK_SIZE bsize,
922
923
                                         const uint8_t *src, int src_stride,
                                         const uint8_t *dst, int dst_stride,
924
                                         double *hordist, double *verdist) {
925
926
  const int bw = block_size_wide[bsize];
  const int bh = block_size_high[bsize];
927
  unsigned int esq[16] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
928

929
  const int f_index = bsize - BLOCK_16X16;
930
  if (f_index < 0) {
931
932
    const int w_shift = bw == 8 ? 1 : 2;
    const int h_shift = bh == 8 ? 1 : 2;
933
#if CONFIG_HIGHBITDEPTH
934
    if (cpi->common.use_highbitdepth) {
935
936
937
938
939
      const uint16_t *src16 = CONVERT_TO_SHORTPTR(src);
      const uint16_t *dst16 = CONVERT_TO_SHORTPTR(dst);
      for (int i = 0; i < bh; ++i)
        for (int j = 0; j < bw; ++j) {
          const int index = (j >> w_shift) + ((i >> h_shift) << 2);
940
941
942
          esq[index] +=
              (src16[j + i * src_stride] - dst16[j + i * dst_stride]) *
              (src16[j + i * src_stride] - dst16[j + i * dst_stride]);
943
944
        }
    } else {
945
#endif  // CONFIG_HIGHBITDEPTH
946

947
948
949
      for (int i = 0; i < bh; ++i)
        for (int j = 0; j < bw; ++j) {
          const int index = (j >> w_shift) + ((i >> h_shift) << 2);
950
951
952
          esq[index] += (src[j + i * src_stride] - dst[j + i * dst_stride]) *
                        (src[j + i * src_stride] - dst[j + i * dst_stride]);
        }
953
#if CONFIG_HIGHBITDEPTH
954
    }
955
#endif  // CONFIG_HIGHBITDEPTH
956
  } else {
957
958
959
960
961
962
963
    cpi->fn_ptr[f_index].vf(src, src_stride, dst, dst_stride, &esq[0]);
    cpi->fn_ptr[f_index].vf(src + bw / 4, src_stride, dst + bw / 4, dst_stride,
                            &esq[1]);
    cpi->fn_ptr[f_index].vf(src + bw / 2, src_stride, dst + bw / 2, dst_stride,
                            &esq[2]);
    cpi->fn_ptr[f_index].vf(src + 3 * bw / 4, src_stride, dst + 3 * bw / 4,
                            dst_stride, &esq[3]);
964
965
966
    src += bh / 4 * src_stride;
    dst += bh / 4 * dst_stride;

967
968
969
970
971
972
973
    cpi->fn_ptr[f_index].vf(src, src_stride, dst, dst_stride, &esq[4]);
    cpi->fn_ptr[f_index].vf(src + bw / 4, src_stride, dst + bw / 4, dst_stride,
                            &esq[5]);
    cpi->fn_ptr[f_index].vf(src + bw / 2, src_stride, dst + bw / 2, dst_stride,
                            &esq[6]);
    cpi->fn_ptr[f_index].vf(src + 3 * bw / 4, src_stride, dst + 3 * bw / 4,
                            dst_stride, &esq[7]);
974
975
976
    src += bh / 4 * src_stride;
    dst += bh / 4 * dst_stride;

977
978
979
980
981
982
983
    cpi->fn_ptr[f_index].vf(src, src_stride, dst, dst_stride, &esq[8]);
    cpi->fn_ptr[f_index].vf(src + bw / 4, src_stride, dst + bw / 4, dst_stride,
                            &esq[9]);
    cpi->fn_ptr[f_index].vf(src + bw / 2, src_stride, dst + bw / 2, dst_stride,
                            &esq[10]);
    cpi->fn_ptr[f_index].vf(src + 3 * bw / 4, src_stride, dst + 3 * bw / 4,
                            dst_stride, &esq[11]);
984
985
986
    src += bh / 4 * src_stride;
    dst += bh / 4 * dst_stride;

987
988
989
990
991
992
993
    cpi->fn_ptr[f_index].vf(src, src_stride, dst, dst_stride, &esq[12]);
    cpi->fn_ptr[f_index].vf(src + bw / 4, src_stride, dst + bw / 4, dst_stride,
                            &esq[13]);
    cpi->fn_ptr[f_index].vf(src + bw / 2, src_stride, dst + bw / 2, dst_stride,
                            &esq[14]);
    cpi->fn_ptr[f_index].vf(src + 3 * bw / 4, src_stride, dst + 3 * bw / 4,
                            dst_stride, &esq[15]);
994
995
  }

996
997
998
  double total = (double)esq[0] + esq[1] + esq[2] + esq[3] + esq[4] + esq[5] +
                 esq[6] + esq[7] + esq[8] + esq[9] + esq[10] + esq[11] +
                 esq[12] + esq[13] + esq[14] + esq[15];
999
1000
  if (total > 0) {
    const double e_recip = 1.0 / total;
1001
1002
1003
1004
1005
1006
    hordist[0] = ((double)esq[0] + esq[4] + esq[8] + esq[12]) * e_recip;
    hordist[1] = ((double)esq[1] + esq[5] + esq[9] + esq[13]) * e_recip;
    hordist[2] = ((double)esq[2] + esq[6] + esq[10] + esq[14]) * e_recip;
    verdist[0] = ((double)esq[0] + esq[1] + esq[2] + esq[3]) * e_recip;
    verdist[1] = ((double)esq[4] + esq[5] + esq[6] + esq[7]) * e_recip;
    verdist[2] = ((double)esq[8] + esq[9] + esq[10] + esq[11]) * e_recip;
1007
1008
1009
1010
1011
1012
1013
  } else {
    hordist[0] = verdist[0] = 0.25;
    hordist[1] = verdist[1] = 0.25;
    hordist[2] = verdist[2] = 0.25;
  }
}

Urvang Joshi's avatar
Urvang Joshi committed
1014
1015
1016
static int adst_vs_flipadst(const AV1_COMP *cpi, BLOCK_SIZE bsize,
                            const uint8_t *src, int src_stride,
                            const uint8_t *dst, int dst_stride) {
1017
1018
  int prune_bitmask = 0;
  double svm_proj_h = 0, svm_proj_v = 0;
Alex Converse's avatar
Alex Converse committed
1019
  double hdist[3] = { 0, 0, 0 }, vdist[3] = { 0, 0, 0 };
1020
1021
  get_energy_distribution_fine(cpi, bsize, src, src_stride, dst, dst_stride,
                               hdist, vdist);
1022

1023
  svm_proj_v = vdist[0] * ADST_FLIP_SVM[0] + vdist[1] * ADST_FLIP_SVM[1] +
1024
               vdist[2] * ADST_FLIP_SVM[2] + ADST_FLIP_SVM[3];
1025
  svm_proj_h = hdist[0] * ADST_FLIP_SVM[4] + hdist[1] * ADST_FLIP_SVM[5] +
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
               hdist[2] * ADST_FLIP_SVM[6] + ADST_FLIP_SVM[7];
  if (svm_proj_v > FAST_EXT_TX_EDST_MID + FAST_EXT_TX_EDST_MARGIN)
    prune_bitmask |= 1 << FLIPADST_1D;
  else if (svm_proj_v < FAST_EXT_TX_EDST_MID - FAST_EXT_TX_EDST_MARGIN)
    prune_bitmask |= 1 << ADST_1D;

  if (svm_proj_h > FAST_EXT_TX_EDST_MID + FAST_EXT_TX_EDST_MARGIN)
    prune_bitmask |= 1 << (FLIPADST_1D + 8);
  else if (svm_proj_h < FAST_EXT_TX_EDST_MID - FAST_EXT_TX_EDST_MARGIN)
    prune_bitmask |= 1 << (ADST_1D + 8);

  return prune_bitmask;
}

Alex Converse's avatar
Alex Converse committed
1040
1041
static void get_horver_correlation(const int16_t *diff, int stride, int w,
                                   int h, double *hcorr, double *vcorr) {
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
  // Returns hor/ver correlation coefficient
  const int num = (h - 1) * (w - 1);
  double num_r;
  int i, j;
  int64_t xy_sum = 0, xz_sum = 0;
  int64_t x_sum = 0, y_sum = 0, z_sum = 0;
  int64_t x2_sum = 0, y2_sum = 0, z2_sum = 0;
  double x_var_n, y_var_n, z_var_n, xy_var_n, xz_var_n;
  *hcorr = *vcorr = 1;

  assert(num > 0);
  num_r = 1.0 / num;
  for (i = 1; i < h; ++i) {
    for (j = 1; j < w; ++j) {
      const int16_t x = diff[i * stride + j];
      const int16_t y = diff[i * stride + j - 1];
      const int16_t z = diff[(i - 1) * stride + j];
      xy_sum += x * y;
      xz_sum += x * z;
      x_sum += x;
      y_sum += y;
      z_sum += z;
      x2_sum += x * x;
      y2_sum += y * y;
      z2_sum += z * z;
    }
  }
1069
1070
1071
  x_var_n = x2_sum - (x_sum * x_sum) * num_r;
  y_var_n = y2_sum - (y_sum * y_sum) * num_r;
  z_var_n = z2_sum - (z_sum * z_sum) * num_r;
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
  xy_var_n = xy_sum - (x_sum * y_sum) * num_r;
  xz_var_n = xz_sum - (x_sum * z_sum) * num_r;
  if (x_var_n > 0 && y_var_n > 0) {
    *hcorr = xy_var_n / sqrt(x_var_n * y_var_n);
    *hcorr = *hcorr < 0 ? 0 : *hcorr;
  }
  if (x_var_n > 0 && z_var_n > 0) {
    *vcorr = xz_var_n / sqrt(x_var_n * z_var_n);
    *vcorr = *vcorr < 0 ? 0 : *vcorr;
  }
}

Alex Converse's avatar
Alex Converse committed
1084
1085
int dct_vs_idtx(const int16_t *diff, int stride, int w, int h) {
  double hcorr, vcorr;
1086
  int prune_bitmask = 0;
Alex Converse's avatar
Alex Converse committed
1087
  get_horver_correlation(diff, stride, w, h, &hcorr, &vcorr);
1088

Alex Converse's avatar
Alex Converse committed
1089
  if (vcorr > FAST_EXT_TX_CORR_MID + FAST_EXT_TX_CORR_MARGIN)
1090
    prune_bitmask |= 1 << IDTX_1D;
Alex Converse's avatar
Alex Converse committed
1091
  else if (vcorr < FAST_EXT_TX_CORR_MID - FAST_EXT_TX_CORR_MARGIN)
1092
1093
    prune_bitmask |= 1 << DCT_1D;

Alex Converse's avatar
Alex Converse committed
1094
  if (hcorr > FAST_EXT_TX_CORR_MID + FAST_EXT_TX_CORR_MARGIN)
1095
    prune_bitmask |= 1 << (IDTX_1D + 8);
Alex Converse's avatar
Alex Converse committed
1096
  else if (hcorr < FAST_EXT_TX_CORR_MID - FAST_EXT_TX_CORR_MARGIN)
1097
1098
1099
1100
1101
    prune_bitmask |= 1 << (DCT_1D + 8);
  return prune_bitmask;
}

// Performance drop: 0.5%, Speed improvement: 24%
Yaowu Xu's avatar
Yaowu Xu committed
1102
static int prune_two_for_sby(const AV1_COMP *cpi, BLOCK_SIZE bsize,
Alex Converse's avatar
Alex Converse committed
1103
1104
                             MACROBLOCK *x, const MACROBLOCKD *xd,
                             int adst_flipadst, int dct_idtx) {
1105
  int prune = 0;
1106

Alex Converse's avatar
Alex Converse committed
1107
1108
1109
  if (adst_flipadst) {
    const struct macroblock_plane *const p = &x->plane[0];
    const struct macroblockd_plane *const pd = &xd->plane[0];
1110
    prune |= adst_vs_flipadst(cpi, bsize, p->src.buf, p->src.stride,
Alex Converse's avatar
Alex Converse committed
1111
1112
1113
1114
1115
1116
1117
1118
1119
                              pd->dst.buf, pd->dst.stride);
  }
  if (dct_idtx) {
    av1_subtract_plane(x, bsize, 0);
    const struct macroblock_plane *const p = &x->plane[0];
    const int bw = 4 << (b_width_log2_lookup[bsize]);
    const int bh = 4 << (b_height_log2_lookup[bsize]);
    prune |= dct_vs_idtx(p->src_diff, bw, bw, bh);
  }
1120
1121
1122

  return prune;
}
1123

1124
// Performance drop: 0.3%, Speed improvement: 5%
Yaowu Xu's avatar
Yaowu Xu committed
1125
static int prune_one_for_sby(const AV1_COMP *cpi, BLOCK_SIZE bsize,
Alex Converse's avatar
Alex Converse committed
1126
1127
1128
                             const MACROBLOCK *x, const MACROBLOCKD *xd) {
  const struct macroblock_plane *const p = &x->plane[0];
  const struct macroblockd_plane *const pd = &xd->plane[0];
1129
  return adst_vs_flipadst(cpi, bsize, p->src.buf, p->src.stride, pd->dst.buf,
Alex Converse's avatar
Alex Converse committed
1130
                          pd->dst.stride);
1131
1132
}

Hui Su's avatar
Hui Su committed
1133
1134
1135
1136
1137
1138
1139
1140
// 1D Transforms used in inter set, this needs to be changed if
// ext_tx_used_inter is changed
static const int ext_tx_used_inter_1D[EXT_TX_SETS_INTER][TX_TYPES_1D] = {
  { 1, 0, 0, 0 }, { 1, 1, 1, 1 }, { 1, 1, 1, 1 }, { 1, 0, 0, 1 },
#if CONFIG_MRC_TX
  { 1, 0, 0, 1 },
#endif  // CONFIG_MRC_TX
};
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193

static void get_energy_distribution_finer(const int16_t *diff, int stride,
                                          int bw, int bh, float *hordist,
                                          float *verdist) {
  // First compute downscaled block energy values (esq); downscale factors
  // are defined by w_shift and h_shift.
  unsigned int esq[256];
  const int w_shift = bw <= 8 ? 0 : 1;
  const int h_shift = bh <= 8 ? 0 : 1;
  const int esq_w = bw <= 8 ? bw : bw / 2;
  const int esq_h = bh <= 8 ? bh : bh / 2;
  const int esq_sz = esq_w * esq_h;
  int i, j;
  memset(esq, 0, esq_sz * sizeof(esq[0]));
  for (i = 0; i < bh; i++) {
    unsigned int *cur_esq_row = esq + (i >> h_shift) * esq_w;
    const int16_t *cur_diff_row = diff + i * stride;
    for (j = 0; j < bw; j++) {
      cur_esq_row[j >> w_shift] += cur_diff_row[j] * cur_diff_row[j];
    }
  }

  uint64_t total = 0;
  for (i = 0; i < esq_sz; i++) total += esq[i];

  // Output hordist and verdist arrays are normalized 1D projections of esq
  if (total == 0) {
    float hor_val = 1.0f / esq_w;
    for (j = 0; j < esq_w - 1; j++) hordist[j] = hor_val;
    float ver_val = 1.0f / esq_h;
    for (i = 0; i < esq_h - 1; i++) verdist[i] = ver_val;
    return;
  }

  const float e_recip = 1.0f / (float)total;
  memset(hordist, 0, (esq_w - 1) * sizeof(hordist[0]));
  memset(verdist, 0, (esq_h - 1) * sizeof(verdist[0]));
  const unsigned int *cur_esq_row;
  for (i = 0; i < esq_h - 1; i++) {
    cur_esq_row = esq + i * esq_w;
    for (j = 0; j < esq_w - 1; j++) {
      hordist[j] += (float)cur_esq_row[j];
      verdist[i] += (float)cur_esq_row[j];
    }
    verdist[i] += (float)cur_esq_row[j];
  }
  cur_esq_row = esq + i * esq_w;
  for (j = 0; j < esq_w - 1; j++) hordist[j] += (float)cur_esq_row[j];

  for (j = 0; j < esq_w - 1; j++) hordist[j] *= e_recip;
  for (i = 0; i < esq_h - 1; i++) verdist[i] *= e_recip;
}

1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
// Instead of 1D projections of the block energy distribution computed by
// get_energy_distribution_finer() this function computes a full
// two-dimensional energy distribution of the input block.
static void get_2D_energy_distribution(const int16_t *diff, int stride, int bw,
                                       int bh, float *edist) {
  unsigned int esq[256] = { 0 };
  const int esq_w = bw >> 2;
  const int esq_h = bh >> 2;
  const int esq_sz = esq_w * esq_h;
  uint64_t total = 0;
  for (int i = 0; i < bh; i += 4) {
    for (int j = 0; j < bw; j += 4) {
      unsigned int cur_sum_energy = 0;
      for (int k = 0; k < 4; k++) {
        const int16_t *cur_diff = diff + (i + k) * stride + j;
        cur_sum_energy += cur_diff[0] * cur_diff[0] +
                          cur_diff[1] * cur_diff[1] +
                          cur_diff[2] * cur_diff[2] + cur_diff[3] * cur_diff[3];
      }
      esq[(i >> 2) * esq_w + (j >> 2)] = cur_sum_energy;
      total += cur_sum_energy;
    }
  }

  const float e_recip = 1.0f / (float)total;
  for (int i = 0; i < esq_sz - 1; i++) edist[i] = esq[i] * e_recip;
}

1222
1223
1224
1225
// Similar to get_horver_correlation, but also takes into account first
// row/column, when computing horizontal/vertical correlation.
static void get_horver_correlation_full(const int16_t *diff, int stride, int w,
                                        int h, float *hcorr, float *vcorr) {
1226
1227
  const float num_hor = (float)(h * (w - 1));
  const float num_ver = (float)((h - 1) * w);
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
  int i, j;

  // The following notation is used:
  // x - current pixel
  // y - left neighbor pixel
  // z - top neighbor pixel
  int64_t xy_sum = 0, xz_sum = 0;
  int64_t xhor_sum = 0, xver_sum = 0, y_sum = 0, z_sum = 0;
  int64_t x2hor_sum = 0, x2ver_sum = 0, y2_sum = 0, z2_sum = 0;

  int16_t x, y, z;
  for (j = 1; j < w; ++j) {
    x = diff[j];
    y = diff[j - 1];
    xy_sum += x * y;
    xhor_sum += x;
    y_sum += y;
    x2hor_sum += x * x;
    y2_sum += y * y;
  }
  for (i = 1; i < h; ++i) {
    x = diff[i * stride];
    z = diff[(i - 1) * stride];
    xz_sum += x * z;
    xver_sum += x;
    z_sum += z;
    x2ver_sum += x * x;
    z2_sum += z * z;
    for (j = 1; j < w; ++j) {
      x = diff[i * stride + j];
      y = diff[i * stride + j - 1];
      z = diff[(i - 1) * stride + j];
      xy_sum += x * y;
      xz_sum += x * z;
      xhor_sum += x;
      xver_sum += x;
      y_sum += y;
      z_sum += z;
      x2hor_sum += x * x;
      x2ver_sum += x * x;
      y2_sum += y * y;
      z2_sum += z * z;
    }
  }
  const float xhor_var_n = x2hor_sum - (xhor_sum * xhor_sum) / num_hor;
  const float y_var_n = y2_sum - (y_sum * y_sum) / num_hor;
  const float xy_var_n = xy_sum - (xhor_sum * y_sum) / num_hor;
  const float xver_var_n = x2ver_sum - (xver_sum * xver_sum) / num_ver;
  const float z_var_n = z2_sum - (z_sum * z_sum) / num_ver;
  const float xz_var_n = xz_sum - (xver_sum * z_sum) / num_ver;

  *hcorr = *vcorr = 1;
  if (xhor_var_n > 0 && y_var_n > 0) {
    *hcorr = xy_var_n / sqrtf(xhor_var_n * y_var_n);
    *hcorr = *hcorr < 0 ? 0 : *hcorr;
  }
  if (xver_var_n > 0 && z_var_n > 0) {
    *vcorr = xz_var_n / sqrtf(xver_var_n * z_var_n);
    *vcorr = *vcorr < 0 ? 0 : *vcorr;
  }
}

// Performs a forward pass through a neural network with 2 fully-connected
// layers, assuming ReLU as activation function. Number of output neurons
// is always equal to 4.
// fc1, fc2 - weight matrices of the respective layers.
// b1, b2 - bias vectors of the respective layers.
static void compute_1D_scores(float *features, int num_features,
                              const float *fc1, const float *b1,
                              const float *fc2, const float *b2,
                              int num_hidden_units, float *dst_scores) {
  assert(num_hidden_units <= 32);
  float hidden_layer[32];
  for (int i = 0; i < num_hidden_units; i++) {
    const float *cur_coef = fc1 + i * num_features;
    hidden_layer[i] = 0.0f;
    for (int j = 0; j < num_features; j++)
      hidden_layer[i] += cur_coef[j] * features[j];
    hidden_layer[i] = AOMMAX(hidden_layer[i] + b1[i], 0.0f);
  }
  for (int i = 0; i < 4; i++) {
    const float *cur_coef = fc2 + i * num_hidden_units;
    dst_scores[i] = 0.0f;
    for (int j = 0; j < num_hidden_units; j++)
      dst_scores[i] += cur_coef[j] * hidden_layer[j];
    dst_scores[i] += b2[i];
  }
}

// Transforms raw scores into a probability distribution across 16 TX types
static void score_2D_transform_pow8(float *scores_2D, float shift) {
  float sum = 0.0f;
  int i;

  for (i = 0; i < 16; i++) {
    float v, v2, v4;
    v = AOMMAX(scores_2D[i] + shift, 0.0f);
    v2 = v * v;
    v4 = v2 * v2;
    scores_2D[i] = v4 * v4;
    sum += scores_2D[i];
  }
  for (i = 0; i < 16; i++) scores_2D[i] /= sum;
}

1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385
// Similarly to compute_1D_scores() performs a forward pass through a
// neural network with two fully-connected layers. The only difference
// is that it assumes 1 output neuron, as required by the classifier used
// for TX size pruning.
static float compute_tx_split_prune_score(float *features, int num_features,
                                          const float *fc1, const float *b1,
                                          const float *fc2, float b2,
                                          int num_hidden_units) {
  assert(num_hidden_units <= 64);
  float hidden_layer[64];
  for (int i = 0; i < num_hidden_units; i++) {
    const float *cur_coef = fc1 + i * num_features;
    hidden_layer[i] = 0.0f;
    for (int j = 0; j < num_features; j++)
      hidden_layer[i] += cur_coef[j] * features[j];
    hidden_layer[i] = AOMMAX(hidden_layer[i] + b1[i], 0.0f);
  }
  float dst_score = 0.0f;
  for (int j = 0; j < num_hidden_units; j++)
    dst_score += fc2[j] * hidden_layer[j];
  dst_score += b2;
  return dst_score;
}

static int prune_tx_split(BLOCK_SIZE bsize, const int16_t *diff, float hcorr,
                          float vcorr) {
  if (bsize <= BLOCK_4X4 || bsize > BLOCK_16X16) return 0;

  float features[17];
  const int bw = block_size_wide[bsize], bh = block_size_high[bsize];
  const int feature_num = (bw / 4) * (bh / 4) + 1;
  assert(feature_num <= 17);

  get_2D_energy_distribution(diff, bw, bw, bh, features);
  features[feature_num - 2] = hcorr;
  features[feature_num - 1] = vcorr;

  const int bidx = bsize - BLOCK_4X4 - 1;
  const float *fc1 = av1_prune_tx_split_learned_weights[bidx];
  const float *b1 =
      fc1 + av1_prune_tx_split_num_hidden_units[bidx] * feature_num;
  const float *fc2 = b1 + av1_prune_tx_split_num_hidden_units[bidx];
  float b2 = *(fc2 + av1_prune_tx_split_num_hidden_units[bidx]);
  float score =
      compute_tx_split_prune_score(features, feature_num, fc1, b1, fc2, b2,
                                   av1_prune_tx_split_num_hidden_units[bidx]);

  return (score > av1_prune_tx_split_thresholds[bidx]);
}

static int prune_tx_2D(BLOCK_SIZE bsize, const MACROBLOCK *x, int tx_set_type,
                       int tx_type_pruning_aggressiveness,
                       int use_tx_split_prune) {
1386
1387
1388
1389
  if (bsize >= BLOCK_32X32) return 0;
  const struct macroblock_plane *const p = &x->plane[0];
  const int bidx = AOMMAX(bsize - BLOCK_4X4, 0);
  const float score_thresh =
1390
1391
      av1_prune_2D_adaptive_thresholds[bidx]
                                      [tx_type_pruning_aggressiveness - 1];
1392
1393
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459

  float hfeatures[16], vfeatures[16];
  float hscores[4], vscores[4];
  float scores_2D[16];
  int tx_type_table_2D[16] = {
    DCT_DCT,      DCT_ADST,      DCT_FLIPADST,      V_DCT,
    ADST_DCT,     ADST_ADST,     ADST_FLIPADST,     V_ADST,
    FLIPADST_DCT, FLIPADST_ADST, FLIPADST_FLIPADST, V_FLIPADST,
    H_DCT,        H_ADST,        H_FLIPADST,        IDTX
  };
  const int bw = block_size_wide[bsize], bh = block_size_high[bsize];
  const int hfeatures_num = bw <= 8 ? bw : bw / 2;
  const int vfeatures_num = bh <= 8 ? bh : bh / 2;
  assert(hfeatures_num <= 16);
  assert(vfeatures_num <= 16);

  get_energy_distribution_finer(p->src_diff, bw, bw, bh, hfeatures, vfeatures);
  get_horver_correlation_full(p->src_diff, bw, bw, bh,
                              &hfeatures[hfeatures_num - 1],
                              &vfeatures[vfeatures_num - 1]);

  const float *fc1_hor = av1_prune_2D_learned_weights_hor[bidx];
  const float *b1_hor =
      fc1_hor + av1_prune_2D_num_hidden_units_hor[bidx] * hfeatures_num;
  const float *fc2_hor = b1_hor + av1_prune_2D_num_hidden_units_hor[bidx];
  const float *b2_hor = fc2_hor + av1_prune_2D_num_hidden_units_hor[bidx] * 4;
  compute_1D_scores(hfeatures, hfeatures_num, fc1_hor, b1_hor, fc2_hor, b2_hor,
                    av1_prune_2D_num_hidden_units_hor[bidx], hscores);

  const float *fc1_ver = av1_prune_2D_learned_weights_ver[bidx];
  const float *b1_ver =
      fc1_ver + av1_prune_2D_num_hidden_units_ver[bidx] * vfeatures_num;
  const float *fc2_ver = b1_ver + av1_prune_2D_num_hidden_units_ver[bidx];
  const float *b2_ver = fc2_ver + av1_prune_2D_num_hidden_units_ver[bidx] * 4;
  compute_1D_scores(vfeatures, vfeatures_num, fc1_ver, b1_ver, fc2_ver, b2_ver,
                    av1_prune_2D_num_hidden_units_ver[bidx], vscores);

  float score_2D_average = 0.0f;
  for (int i = 0; i < 4; i++) {
    float *cur_scores_2D = scores_2D + i * 4;
    cur_scores_2D[0] = vscores[i] * hscores[0];
    cur_scores_2D[1] = vscores[i] * hscores[1];
    cur_scores_2D[2] = vscores[i] * hscores[2];
    cur_scores_2D[3] = vscores[i] * hscores[3];
    score_2D_average += cur_scores_2D[0] + cur_scores_2D[1] + cur_scores_2D[2] +
                        cur_scores_2D[3];
  }
  score_2D_average /= 16;
  score_2D_transform_pow8(scores_2D, (20 - score_2D_average));

  // Always keep the TX type with the highest score, prune all others with
  // score below score_thresh.
  int max_score_i = 0;
  float max_score = 0.0f;
  for (int i = 0; i < 16; i++) {
    if (scores_2D[i] > max_score &&
        av1_ext_tx_used[tx_set_type][tx_type_table_2D[i]]) {
      max_score = scores_2D[i];
      max_score_i = i;
    }
  }

  int prune_bitmask = 0;
  for (int i = 0; i < 16; i++) {
    if (scores_2D[i] < score_thresh && i != max_score_i)
      prune_bitmask |= (1 << tx_type_table_2D[i]);
  }

1460
1461
1462
1463
1464
1465
1466
1467
1468
1469
1470
  // Also apply TX size pruning if it's turned on. The value
  // of prune_tx_split_flag indicates whether we should do
  // full TX size search (flag=0) or use the largest available
  // TX size without performing any further search (flag=1).
  int prune_tx_split_flag = 0;
  if (use_tx_split_prune) {
    prune_tx_split_flag =
        prune_tx_split(bsize, p->src_diff, hfeatures[hfeatures_num - 1],
                       vfeatures[vfeatures_num - 1]);
  }
  prune_bitmask |= (prune_tx_split_flag << TX_TYPES);
1471
1472
  return prune_bitmask;
}
Hui Su's avatar
Hui Su committed
1473

1474
1475
1476
static int prune_tx(const AV1_COMP *cpi, BLOCK_SIZE bsize, MACROBLOCK *x,
                    const MACROBLOCKD *const xd, int tx_set_type,
                    int use_tx_split_prune) {
1477
1478
1479
  int tx_set = ext_tx_set_index[1][tx_set_type];
  assert(tx_set >= 0);
  const int *tx_set_1D = ext_tx_used_inter_1D[tx_set];
1480

1481
  switch (cpi->sf.tx_type_search.prune_mode) {
1482
1483
    case NO_PRUNE: return 0; break;
    case PRUNE_ONE:
1484
      if (!(tx_set_1D[FLIPADST_1D] & tx_set_1D[ADST_1D])) return 0;
1485
1486
      return prune_one_for_sby(cpi, bsize, x, xd);
      break;
1487
    case PRUNE_TWO:
1488
      if (!(tx_set_1D[FLIPADST_1D] & tx_set_1D[ADST_1D])) {
1489
        if (!(tx_set_1D[DCT_1D] & tx_set_1D[IDTX_1D])) return 0;
1490
1491
        return prune_two_for_sby(cpi, bsize, x, xd, 0, 1);
      }
1492
      if (!(tx_set_1D[DCT_1D] & tx_set_1D[IDTX_1D]))
1493
1494
        return prune_two_for_sby(cpi, bsize, x, xd, 1, 0);
      return prune_two_for_sby(cpi, bsize, x, xd, 1, 1);
1495
      break;
1496
1497
    case PRUNE_2D_ACCURATE:
      if (tx_set_type == EXT_TX_SET_ALL16)
1498
        return prune_tx_2D(bsize, x, tx_set_type, 6, use_tx_split_prune);
1499
      else if (tx_set_type == EXT_TX_SET_DTT9_IDTX_1DDCT)
1500
        return prune_tx_2D(bsize, x, tx_set_type, 4, use_tx_split_prune);
1501
1502
1503
1504
1505
      else
        return 0;
      break;
    case PRUNE_2D_FAST:
      if (tx_set_type == EXT_TX_SET_ALL16)
1506
        return prune_tx_2D(bsize, x, tx_set_type, 10, use_tx_split_prune);
1507
      else if (tx_set_type == EXT_TX_SET_DTT9_IDTX_1DDCT)
1508
        return prune_tx_2D(bsize, x, tx_set_type, 7, use_tx_split_prune);
1509
1510
1511
      else
        return 0;
      break;
1512
1513
1514
1515
1516
  }
  assert(0);
  return 0;
}

1517
1518
static int do_tx_type_search(TX_TYPE tx_type, int prune,
                             TX_TYPE_PRUNE_MODE mode) {
1519
  // TODO(sarahparker) implement for non ext tx
1520
1521
1522
1523
1524
1525
  if (mode >= PRUNE_2D_ACCURATE) {
    return !((prune >> tx_type) & 1);
  } else {
    return !(((prune >> vtx_tab[tx_type]) & 1) |
             ((prune >> (htx_tab[tx_type] + 8)) & 1));
  }
1526
1527
}

Yaowu Xu's avatar
Yaowu Xu committed
1528
static void model_rd_from_sse(const AV1_COMP *const cpi,
1529
1530
                              const MACROBLOCKD *const xd, BLOCK_SIZE bsize,
                              int plane, int64_t sse, int *rate,
Geza Lore's avatar
Geza Lore committed
1531
1532
1533
                              int64_t *dist) {
  const struct macroblockd_plane *const pd = &xd->plane[plane];
  const int dequant_shift =
1534
#if CONFIG_HIGHBITDEPTH
1535
      (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) ? xd->bd - 5 :
1536
#endif  // CONFIG_HIGHBITDEPTH
1537
                                                    3;
Geza Lore's avatar
Geza Lore committed
1538
1539
1540
1541
1542
1543
1544

  // Fast approximate the modelling function.
  if (cpi->sf.simple_model_rd_from_var) {
    const int64_t square_error = sse;
    int quantizer = (pd->dequant[1] >> dequant_shift);

    if (quantizer < 120)
Yaowu Xu's avatar
Yaowu Xu committed
1545
      *rate = (int)((square_error * (280 - quantizer)) >>
Yaowu Xu's avatar
Yaowu Xu committed
1546
                    (16 - AV1_PROB_COST_SHIFT));
Geza Lore's avatar
Geza Lore committed
1547
1548
1549
1550
    else
      *rate = 0;
    *dist = (square_error * quantizer) >> 8;
  } else {
Yaowu Xu's avatar
Yaowu Xu committed
1551
1552
    av1_model_rd_from_var_lapndz(sse, num_pels_log2_lookup[bsize],
                                 pd->dequant[1] >> dequant_shift, rate, dist);
Geza Lore's avatar
Geza Lore committed
1553
1554
1555
1556
1557
  }

  *dist <<= 4;
}

Yaowu Xu's avatar
Yaowu Xu committed
1558
static void model_rd_for_sb(const AV1_COMP *const cpi, BLOCK_SIZE bsize,
1559
1560
1561
1562
                            MACROBLOCK *x, MACROBLOCKD *xd, int plane_from,
                            int plane_to, int *out_rate_sum,
                            int64_t *out_dist_sum, int *skip_txfm_sb,
                            int64_t *skip_sse_sb) {