Commit 2fa189e5 authored by Sebastien Alaiwan's avatar Sebastien Alaiwan Committed by Debargha Mukherjee

Remove LGT experiment

This experiment has been abandonned for AV1.

Change-Id: If560a67d00b8ae3daa377a59293d5125a8cb7902
parent 5d0320f7
......@@ -29,9 +29,9 @@ typedef struct txfm_param {
int lossless;
int bd;
TxSetType tx_set_type;
#if CONFIG_MRC_TX || CONFIG_LGT
#if CONFIG_MRC_TX
int is_inter;
#endif // CONFIG_MRC_TX || CONFIG_LGT
#endif // CONFIG_MRC_TX
#if CONFIG_MRC_TX
int stride;
uint8_t *dst;
......@@ -99,53 +99,4 @@ static INLINE tran_high_t fdct_round_shift(tran_high_t input) {
return rv;
}
#if CONFIG_LGT
// LGT4 name: lgt4_170
// Self loops: 1.700, 0.000, 0.000, 0.000
// Edges: 1.000, 1.000, 1.000
static const tran_high_t lgt4_170[4][4] = {
{ 3636, 9287, 13584, 15902 },
{ 10255, 15563, 2470, -13543 },
{ 14786, 711, -15249, 9231 },
{ 14138, -14420, 10663, -3920 },
};
// LGT4 name: lgt4_140
// Self loops: 1.400, 0.000, 0.000, 0.000
// Edges: 1.000, 1.000, 1.000
static const tran_high_t lgt4_140[4][4] = {
{ 4206, 9518, 13524, 15674 },
{ 11552, 14833, 1560, -13453 },
{ 15391, -1906, -14393, 9445 },
{ 12201, -14921, 12016, -4581 },
};
// LGT8 name: lgt8_170
// Self loops: 1.700, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000
// Edges: 1.000, 1.000, 1.000, 1.000, 1.000, 1.000, 1.000
static const tran_high_t lgt8_170[8][8] = {
{ 1858, 4947, 7850, 10458, 12672, 14411, 15607, 16217 },
{ 5494, 13022, 16256, 14129, 7343, -1864, -10456, -15601 },
{ 8887, 16266, 9500, -5529, -15749, -12273, 1876, 14394 },
{ 11870, 13351, -6199, -15984, -590, 15733, 7273, -12644 },
{ 14248, 5137, -15991, 291, 15893, -5685, -13963, 10425 },
{ 15716, -5450, -10010, 15929, -6665, -8952, 16036, -7835 },
{ 15533, -13869, 6559, 3421, -12009, 15707, -13011, 5018 },
{ 11357, -13726, 14841, -14600, 13025, -10259, 6556, -2254 },
};
// LGT8 name: lgt8_150
// Self loops: 1.500, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000
// Edges: 1.000, 1.000, 1.000, 1.000, 1.000, 1.000, 1.000
static const tran_high_t lgt8_150[8][8] = {
{ 2075, 5110, 7958, 10511, 12677, 14376, 15544, 16140 },
{ 6114, 13307, 16196, 13845, 7015, -2084, -10509, -15534 },
{ 9816, 16163, 8717, -6168, -15790, -11936, 2104, 14348 },
{ 12928, 12326, -7340, -15653, 242, 15763, 6905, -12632 },
{ 15124, 3038, -16033, 1758, 15507, -6397, -13593, 10463 },
{ 15895, -7947, -7947, 15895, -7947, -7947, 15895, -7947 },
{ 14325, -15057, 9030, 1050, -10659, 15483, -13358, 5236 },
{ 9054, -12580, 14714, -15220, 14043, -11312, 7330, -2537 },
};
#endif // CONFIG_LGT
#endif // AOM_DSP_TXFM_COMMON_H_
......@@ -70,7 +70,7 @@ if (aom_config("CONFIG_HIGHBITDEPTH") eq "yes") {
# Inverse dct
#
add_proto qw/void av1_iht4x4_16_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, const struct txfm_param *param";
if (aom_config("CONFIG_DAALA_TX4") ne "yes" && aom_config("CONFIG_LGT") ne "yes") {
if (aom_config("CONFIG_DAALA_TX4") ne "yes") {
if (aom_config("CONFIG_HIGHBITDEPTH") eq "yes") {
specialize qw/av1_iht4x4_16_add sse2/;
} else {
......@@ -105,7 +105,7 @@ add_proto qw/void av1_iht8x32_256_add/, "const tran_low_t *input, uint8_t *dest,
add_proto qw/void av1_iht32x8_256_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, const struct txfm_param *param";
add_proto qw/void av1_iht8x8_64_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, const struct txfm_param *param";
if (aom_config("CONFIG_DAALA_TX8") ne "yes" && aom_config("CONFIG_LGT") ne "yes") {
if (aom_config("CONFIG_DAALA_TX8") ne "yes") {
if (aom_config("CONFIG_HIGHBITDEPTH") eq "yes") {
specialize qw/av1_iht8x8_64_add sse2/;
} else {
......@@ -115,7 +115,7 @@ if (aom_config("CONFIG_DAALA_TX8") ne "yes" && aom_config("CONFIG_LGT") ne "yes"
add_proto qw/void av1_iht16x16_256_add/, "const tran_low_t *input, uint8_t *output, int pitch, const struct txfm_param *param";
if (aom_config("CONFIG_DAALA_TX16") ne "yes" && aom_config("CONFIG_LGT") ne "yes") {
if (aom_config("CONFIG_DAALA_TX16") ne "yes") {
if (aom_config("CONFIG_HIGHBITDEPTH") eq "yes") {
specialize qw/av1_iht16x16_256_add sse2 avx2/;
} else {
......
This diff is collapsed.
......@@ -32,13 +32,6 @@ typedef struct {
transform_1d cols, rows; // vertical and horizontal
} transform_2d;
#if CONFIG_LGT
int get_lgt4(const TxfmParam *txfm_param, int is_col,
const tran_high_t **lgtmtx);
int get_lgt8(const TxfmParam *txfm_param, int is_col,
const tran_high_t **lgtmtx);
#endif // CONFIG_LGT
#if CONFIG_HIGHBITDEPTH
typedef void (*highbd_transform_1d)(const tran_low_t *, tran_low_t *, int bd);
......
......@@ -1070,32 +1070,6 @@ static void get_masked_residual32(const int16_t **input, int *input_stride,
}
#endif // CONFIG_MRC_TX
#if CONFIG_LGT
static void flgt4(const tran_low_t *input, tran_low_t *output,
const tran_high_t *lgtmtx) {
if (!lgtmtx) assert(0);
// evaluate s[j] = sum of all lgtmtx[j][i]*input[i] over i=1,...,4
tran_high_t s[4] = { 0 };
for (int i = 0; i < 4; ++i)
for (int j = 0; j < 4; ++j) s[j] += lgtmtx[j * 4 + i] * input[i];
for (int i = 0; i < 4; ++i) output[i] = (tran_low_t)fdct_round_shift(s[i]);
}
static void flgt8(const tran_low_t *input, tran_low_t *output,
const tran_high_t *lgtmtx) {
if (!lgtmtx) assert(0);
// evaluate s[j] = sum of all lgtmtx[j][i]*input[i] over i=1,...,8
tran_high_t s[8] = { 0 };
for (int i = 0; i < 8; ++i)
for (int j = 0; j < 8; ++j) s[j] += lgtmtx[j * 8 + i] * input[i];
for (int i = 0; i < 8; ++i) output[i] = (tran_low_t)fdct_round_shift(s[i]);
}
#endif // CONFIG_LGT
// TODO(sarahparker) these functions will be removed once the highbitdepth
// codepath works properly for rectangular transforms. They have almost
// identical versions in av1_fwd_txfm1d.c, but those are currently only
......@@ -1286,15 +1260,6 @@ void av1_fht4x4_c(const int16_t *input, tran_low_t *output, int stride,
int16_t flipped_input[4 * 4];
maybe_flip_input(&input, &stride, 4, 4, flipped_input, tx_type);
#if CONFIG_LGT
// Choose LGT adaptive to the prediction. We may apply different LGTs for
// different rows/columns, indicated by the pointers to 2D arrays
const tran_high_t *lgtmtx_col[1];
const tran_high_t *lgtmtx_row[1];
int use_lgt_col = get_lgt4(txfm_param, 1, lgtmtx_col);
int use_lgt_row = get_lgt4(txfm_param, 0, lgtmtx_row);
#endif
// Columns
for (i = 0; i < 4; ++i) {
/* A C99-safe upshift by 4 for both Daala and VPx TX. */
......@@ -1302,24 +1267,14 @@ void av1_fht4x4_c(const int16_t *input, tran_low_t *output, int stride,
#if !CONFIG_DAALA_TX4
if (i == 0 && temp_in[0]) temp_in[0] += 1;
#endif
#if CONFIG_LGT
if (use_lgt_col)
flgt4(temp_in, temp_out, lgtmtx_col[0]);
else
#endif
ht.cols(temp_in, temp_out);
ht.cols(temp_in, temp_out);
for (j = 0; j < 4; ++j) out[j * 4 + i] = temp_out[j];
}
// Rows
for (i = 0; i < 4; ++i) {
for (j = 0; j < 4; ++j) temp_in[j] = out[j + i * 4];
#if CONFIG_LGT
if (use_lgt_row)
flgt4(temp_in, temp_out, lgtmtx_row[0]);
else
#endif
ht.rows(temp_in, temp_out);
ht.rows(temp_in, temp_out);
#if CONFIG_DAALA_TX4
/* Daala TX has orthonormal scaling; shift down by only 1 to achieve
the usual VPx coefficient left-shift of 3. */
......@@ -1386,13 +1341,6 @@ void av1_fht4x8_c(const int16_t *input, tran_low_t *output, int stride,
int16_t flipped_input[8 * 4];
maybe_flip_input(&input, &stride, n2, n, flipped_input, tx_type);
#if CONFIG_LGT
const tran_high_t *lgtmtx_col[1];
const tran_high_t *lgtmtx_row[1];
int use_lgt_col = get_lgt8(txfm_param, 1, lgtmtx_col);
int use_lgt_row = get_lgt4(txfm_param, 0, lgtmtx_row);
#endif
// Multi-way scaling matrix (bits):
// LGT/AV1 row,col input+2.5, rowTX+.5, mid+0, colTX+1, out-1 == 3
// LGT row, Daala col input+3.5, rowTX+.5, mid+0, colTX+0, out-1 == 3
......@@ -1404,29 +1352,16 @@ void av1_fht4x8_c(const int16_t *input, tran_low_t *output, int stride,
// Input scaling
for (j = 0; j < n; ++j) {
#if CONFIG_DAALA_TX4 && CONFIG_DAALA_TX8
#if CONFIG_LGT
// Input scaling when LGT might be active (1-4 above)
temp_in[j] = use_lgt_row ?
(tran_low_t)fdct_round_shift(input[i * stride + j] * Sqrt2 *
(use_lgt_col ? 4 : 8)) :
input[i * stride + j] * (use_lgt_col ? 8 : 16));
#else
// Input scaling when LGT is not possible, Daala only (4 above)
temp_in[j] = input[i * stride + j] * 16;
#endif
#else
// Input scaling when Daala is not possible, LGT/AV1 only (1 above)
temp_in[j] =
(tran_low_t)fdct_round_shift(input[i * stride + j] * 4 * Sqrt2);
#endif
}
// Row transform (AV1/LGT scale up .5 bit, Daala does not scale)
#if CONFIG_LGT
if (use_lgt_row)
flgt4(temp_in, temp_out, lgtmtx_row[0]);
else
#endif
ht.rows(temp_in, temp_out);
// Row transform (AV1/LGT scale up .5 bit, Daala does not scale)
ht.rows(temp_in, temp_out);
// No mid scaling
for (j = 0; j < n; ++j) out[j * n2 + i] = temp_out[j];
}
......@@ -1434,13 +1369,8 @@ void av1_fht4x8_c(const int16_t *input, tran_low_t *output, int stride,
// Columns
for (i = 0; i < n; ++i) {
for (j = 0; j < n2; ++j) temp_in[j] = out[j + i * n2];
// Column transform (AV1/LGT scale up 1 bit, Daala does not scale)
#if CONFIG_LGT
if (use_lgt_col)
flgt8(temp_in, temp_out, lgtmtx_col[0]);
else
#endif
ht.cols(temp_in, temp_out);
// Column transform (AV1/LGT scale up 1 bit, Daala does not scale)
ht.cols(temp_in, temp_out);
// Output scaling is always a downshift of 1
for (j = 0; j < n2; ++j)
output[i + j * n] = (temp_out[j] + (temp_out[j] < 0)) >> 1;
......@@ -1503,13 +1433,6 @@ void av1_fht8x4_c(const int16_t *input, tran_low_t *output, int stride,
int16_t flipped_input[8 * 4];
maybe_flip_input(&input, &stride, n, n2, flipped_input, tx_type);
#if CONFIG_LGT
const tran_high_t *lgtmtx_col[1];
const tran_high_t *lgtmtx_row[1];
int use_lgt_col = get_lgt4(txfm_param, 1, lgtmtx_col);
int use_lgt_row = get_lgt8(txfm_param, 0, lgtmtx_row);
#endif
// Multi-way scaling matrix (bits):
// LGT/AV1 row,col input+2.5, rowTX+1, mid+0, colTX+.5, out-1 == 3
// LGT row, Daala col input+3, rowTX+1, mid+0, colTX+0, out-1 == 3
......@@ -1520,29 +1443,16 @@ void av1_fht8x4_c(const int16_t *input, tran_low_t *output, int stride,
for (i = 0; i < n2; ++i) {
for (j = 0; j < n; ++j) {
#if CONFIG_DAALA_TX4 && CONFIG_DAALA_TX8
#if CONFIG_LGT
// Input scaling when LGT might be active (1-4 above)
temp_in[j] = use_lgt_col ?
(tran_low_t)fdct_round_shift(input[j * stride + i] * Sqrt2 *
(use_lgt_row ? 4 : 8)) :
input[j * stride + i] * (use_lgt_row ? 8 : 16));
#else
// Input scaling when LGT is not possible, Daala only (4 above)
temp_in[j] = input[j * stride + i] * 16;
#endif
#else
// Input scaling when Daala is not possible, AV1/LGT only (1 above)
temp_in[j] =
(tran_low_t)fdct_round_shift(input[j * stride + i] * 4 * Sqrt2);
#endif
}
// Column transform (AV1/LGT scale up .5 bit, Daala does not scale)
#if CONFIG_LGT
if (use_lgt_col)
flgt4(temp_in, temp_out, lgtmtx_col[0]);
else
#endif
ht.cols(temp_in, temp_out);
// Column transform (AV1/LGT scale up .5 bit, Daala does not scale)
ht.cols(temp_in, temp_out);
// No scaling between transforms
for (j = 0; j < n; ++j) out[j * n2 + i] = temp_out[j];
}
......@@ -1550,13 +1460,8 @@ void av1_fht8x4_c(const int16_t *input, tran_low_t *output, int stride,
// Rows
for (i = 0; i < n; ++i) {
for (j = 0; j < n2; ++j) temp_in[j] = out[j + i * n2];
// Row transform (AV1/LGT scale up 1 bit, Daala does not scale)
#if CONFIG_LGT
if (use_lgt_row)
flgt8(temp_in, temp_out, lgtmtx_row[0]);
else
#endif
ht.rows(temp_in, temp_out);
// Row transform (AV1/LGT scale up 1 bit, Daala does not scale)
ht.rows(temp_in, temp_out);
// Output scaling is always a downshift of 1
for (j = 0; j < n2; ++j)
output[j + i * n2] = (temp_out[j] + (temp_out[j] < 0)) >> 1;
......@@ -1600,20 +1505,10 @@ void av1_fht4x16_c(const int16_t *input, tran_low_t *output, int stride,
int16_t flipped_input[16 * 4];
maybe_flip_input(&input, &stride, n4, n, flipped_input, tx_type);
#if CONFIG_LGT
const tran_high_t *lgtmtx_row[1];
int use_lgt_row = get_lgt4(txfm_param, 0, lgtmtx_row);
#endif
// Rows
for (i = 0; i < n4; ++i) {
for (j = 0; j < n; ++j) temp_in[j] = input[i * stride + j] * 4;
#if CONFIG_LGT
if (use_lgt_row)
flgt4(temp_in, temp_out, lgtmtx_row[0]);
else
#endif
ht.rows(temp_in, temp_out);
ht.rows(temp_in, temp_out);
for (j = 0; j < n; ++j) out[j * n4 + i] = temp_out[j];
}
......@@ -1663,20 +1558,10 @@ void av1_fht16x4_c(const int16_t *input, tran_low_t *output, int stride,
int16_t flipped_input[16 * 4];
maybe_flip_input(&input, &stride, n, n4, flipped_input, tx_type);
#if CONFIG_LGT
const tran_high_t *lgtmtx_col[1];
int use_lgt_col = get_lgt4(txfm_param, 1, lgtmtx_col);
#endif
// Columns
for (i = 0; i < n4; ++i) {
for (j = 0; j < n; ++j) temp_in[j] = input[j * stride + i] * 4;
#if CONFIG_LGT
if (use_lgt_col)
flgt4(temp_in, temp_out, lgtmtx_col[0]);
else
#endif
ht.cols(temp_in, temp_out);
ht.cols(temp_in, temp_out);
for (j = 0; j < n; ++j) out[j * n4 + i] = temp_out[j];
}
......@@ -1745,11 +1630,6 @@ void av1_fht8x16_c(const int16_t *input, tran_low_t *output, int stride,
int16_t flipped_input[16 * 8];
maybe_flip_input(&input, &stride, n2, n, flipped_input, tx_type);
#if CONFIG_LGT
const tran_high_t *lgtmtx_row[1];
int use_lgt_row = get_lgt8(txfm_param, 0, lgtmtx_row);
#endif
// Multi-way scaling matrix (bits):
// LGT/AV1 row, AV1 col input+2.5, rowTX+1, mid-2, colTX+1.5, out+0 == 3
// LGT row, Daala col input+3, rowTX+1, mid+0, colTX+0, out-1 == 3
......@@ -1761,13 +1641,8 @@ void av1_fht8x16_c(const int16_t *input, tran_low_t *output, int stride,
// Input scaling
for (j = 0; j < n; ++j) {
#if CONFIG_DAALA_TX8 && CONFIG_DAALA_TX16
#if CONFIG_LGT
// Input scaling when LGT might be active (cases 2, 4 above)
temp_in[j] = input[i * stride + j] * (use_lgt_row ? 2 : 4) * 4;
#else
// Input scaling when LGT is not possible, Daala only (case 4 above)
temp_in[j] = input[i * stride + j] * 16;
#endif
#else
// Input scaling when Daala is not possible, LGT/AV1 only (case 1 above)
temp_in[j] =
......@@ -1775,13 +1650,8 @@ void av1_fht8x16_c(const int16_t *input, tran_low_t *output, int stride,
#endif
}
// Row transform (AV1/LGT scale up 1 bit, Daala does not scale)
#if CONFIG_LGT
if (use_lgt_row)
flgt8(temp_in, temp_out, lgtmtx_row[0]);
else
#endif
ht.rows(temp_in, temp_out);
// Row transform (AV1/LGT scale up 1 bit, Daala does not scale)
ht.rows(temp_in, temp_out);
// Mid scaling
for (j = 0; j < n; ++j) {
......@@ -1868,11 +1738,6 @@ void av1_fht16x8_c(const int16_t *input, tran_low_t *output, int stride,
int16_t flipped_input[16 * 8];
maybe_flip_input(&input, &stride, n, n2, flipped_input, tx_type);
#if CONFIG_LGT
const tran_high_t *lgtmtx_col[1];
int use_lgt_col = get_lgt8(txfm_param, 1, lgtmtx_col);
#endif
// Multi-way scaling matrix (bits):
// LGT/AV1 col, AV1 row input+2.5, colTX+1, mid-2, rowTX+1.5, out+0 == 3
// LGT col, Daala row input+3, colTX+1, mid+0, rowTX+0, out-1 == 3
......@@ -1884,13 +1749,8 @@ void av1_fht16x8_c(const int16_t *input, tran_low_t *output, int stride,
// Input scaling
for (j = 0; j < n; ++j) {
#if CONFIG_DAALA_TX8 && CONFIG_DAALA_TX16
#if CONFIG_LGT
// Input scaling when LGT might be active (1, 2 above)
temp_in[j] = input[j * stride + i] * 4 * (use_lgt_col ? 2 : 4);
#else
// Input scaling when LGT is not possible, Daala only (4 above)
temp_in[j] = input[j * stride + i] * 16;
#endif
#else
// Input scaling when Daala is not possible, AV1/LGT only (1 above)
temp_in[j] =
......@@ -1898,13 +1758,8 @@ void av1_fht16x8_c(const int16_t *input, tran_low_t *output, int stride,
#endif
}
// Column transform (AV1/LGT scale up 1 bit, Daala does not scale)
#if CONFIG_LGT
if (use_lgt_col)
flgt8(temp_in, temp_out, lgtmtx_col[0]);
else
#endif
ht.cols(temp_in, temp_out);
// Column transform (AV1/LGT scale up 1 bit, Daala does not scale)
ht.cols(temp_in, temp_out);
// Mid scaling
for (j = 0; j < n; ++j) {
......@@ -1972,20 +1827,10 @@ void av1_fht8x32_c(const int16_t *input, tran_low_t *output, int stride,
int16_t flipped_input[32 * 8];
maybe_flip_input(&input, &stride, n4, n, flipped_input, tx_type);
#if CONFIG_LGT
const tran_high_t *lgtmtx_row[1];
int use_lgt_row = get_lgt8(txfm_param, 0, lgtmtx_row);
#endif
// Rows
for (i = 0; i < n4; ++i) {
for (j = 0; j < n; ++j) temp_in[j] = input[i * stride + j] * 4;
#if CONFIG_LGT
if (use_lgt_row)
flgt8(temp_in, temp_out, lgtmtx_row[0]);
else
#endif
ht.rows(temp_in, temp_out);
ht.rows(temp_in, temp_out);
for (j = 0; j < n; ++j) out[j * n4 + i] = temp_out[j];
}
......@@ -2035,20 +1880,10 @@ void av1_fht32x8_c(const int16_t *input, tran_low_t *output, int stride,
int16_t flipped_input[32 * 8];
maybe_flip_input(&input, &stride, n, n4, flipped_input, tx_type);
#if CONFIG_LGT
const tran_high_t *lgtmtx_col[1];
int use_lgt_col = get_lgt8(txfm_param, 1, lgtmtx_col);
#endif
// Columns
for (i = 0; i < n4; ++i) {
for (j = 0; j < n; ++j) temp_in[j] = input[j * stride + i] * 4;
#if CONFIG_LGT
if (use_lgt_col)
flgt8(temp_in, temp_out, lgtmtx_col[0]);
else
#endif
ht.cols(temp_in, temp_out);
ht.cols(temp_in, temp_out);
for (j = 0; j < n; ++j) out[j * n4 + i] = temp_out[j];
}
......@@ -2291,13 +2126,6 @@ void av1_fht8x8_c(const int16_t *input, tran_low_t *output, int stride,
int16_t flipped_input[8 * 8];
maybe_flip_input(&input, &stride, 8, 8, flipped_input, tx_type);
#if CONFIG_LGT
const tran_high_t *lgtmtx_col[1];
const tran_high_t *lgtmtx_row[1];
int use_lgt_col = get_lgt8(txfm_param, 1, lgtmtx_col);
int use_lgt_row = get_lgt8(txfm_param, 0, lgtmtx_row);
#endif
// Columns
for (i = 0; i < 8; ++i) {
#if CONFIG_DAALA_TX8
......@@ -2305,24 +2133,14 @@ void av1_fht8x8_c(const int16_t *input, tran_low_t *output, int stride,
#else
for (j = 0; j < 8; ++j) temp_in[j] = input[j * stride + i] * 4;
#endif
#if CONFIG_LGT
if (use_lgt_col)
flgt8(temp_in, temp_out, lgtmtx_col[0]);
else
#endif
ht.cols(temp_in, temp_out);
ht.cols(temp_in, temp_out);
for (j = 0; j < 8; ++j) out[j * 8 + i] = temp_out[j];
}
// Rows
for (i = 0; i < 8; ++i) {
for (j = 0; j < 8; ++j) temp_in[j] = out[j + i * 8];
#if CONFIG_LGT
if (use_lgt_row)
flgt8(temp_in, temp_out, lgtmtx_row[0]);
else
#endif
ht.rows(temp_in, temp_out);
ht.rows(temp_in, temp_out);
#if CONFIG_DAALA_TX8
for (j = 0; j < 8; ++j)
output[j + i * 8] = (temp_out[j] + (temp_out[j] < 0)) >> 1;
......
......@@ -542,7 +542,7 @@ void av1_xform_quant(const AV1_COMMON *cm, MACROBLOCK *x, int plane, int block,
txfm_param.tx_set_type =
get_ext_tx_set_type(txfm_param.tx_size, plane_bsize, is_inter_block(mbmi),
cm->reduced_tx_set_used);
#if CONFIG_MRC_TX || CONFIG_LGT
#if CONFIG_MRC_TX
txfm_param.is_inter = is_inter_block(mbmi);
#endif
#if CONFIG_MRC_TX
......
......@@ -24,7 +24,7 @@ static void fwd_txfm_4x4(const int16_t *src_diff, tran_low_t *coeff,
return;
}
#if CONFIG_LGT || CONFIG_DAALA_TX4
#if CONFIG_DAALA_TX4
// only C version has LGTs
av1_fht4x4_c(src_diff, coeff, diff_stride, txfm_param);
#else
......@@ -34,7 +34,7 @@ static void fwd_txfm_4x4(const int16_t *src_diff, tran_low_t *coeff,
static void fwd_txfm_4x8(const int16_t *src_diff, tran_low_t *coeff,
int diff_stride, TxfmParam *txfm_param) {
#if CONFIG_LGT || (CONFIG_DAALA_TX4 && CONFIG_DAALA_TX8)
#if (CONFIG_DAALA_TX4 && CONFIG_DAALA_TX8)
av1_fht4x8_c(src_diff, coeff, diff_stride, txfm_param);
#else
av1_fht4x8(src_diff, coeff, diff_stride, txfm_param);
......@@ -43,7 +43,7 @@ static void fwd_txfm_4x8(const int16_t *src_diff, tran_low_t *coeff,
static void fwd_txfm_8x4(const int16_t *src_diff, tran_low_t *coeff,
int diff_stride, TxfmParam *txfm_param) {
#if CONFIG_LGT || (CONFIG_DAALA_TX4 && CONFIG_DAALA_TX8)
#if (CONFIG_DAALA_TX4 && CONFIG_DAALA_TX8)
av1_fht8x4_c(src_diff, coeff, diff_stride, txfm_param);
#else
av1_fht8x4(src_diff, coeff, diff_stride, txfm_param);
......@@ -52,7 +52,7 @@ static void fwd_txfm_8x4(const int16_t *src_diff, tran_low_t *coeff,
static void fwd_txfm_8x16(const int16_t *src_diff, tran_low_t *coeff,
int diff_stride, TxfmParam *txfm_param) {
#if CONFIG_LGT || (CONFIG_DAALA_TX8 && CONFIG_DAALA_TX16)
#if (CONFIG_DAALA_TX8 && CONFIG_DAALA_TX16)
av1_fht8x16_c(src_diff, coeff, diff_stride, txfm_param);
#else
av1_fht8x16(src_diff, coeff, diff_stride, txfm_param);
......@@ -61,7 +61,7 @@ static void fwd_txfm_8x16(const int16_t *src_diff, tran_low_t *coeff,
static void fwd_txfm_16x8(const int16_t *src_diff, tran_low_t *coeff,
int diff_stride, TxfmParam *txfm_param) {
#if CONFIG_LGT || (CONFIG_DAALA_TX8 && CONFIG_DAALA_TX16)
#if (CONFIG_DAALA_TX8 && CONFIG_DAALA_TX16)
av1_fht16x8_c(src_diff, coeff, diff_stride, txfm_param);
#else
av1_fht16x8(src_diff, coeff, diff_stride, txfm_param);
......@@ -88,7 +88,7 @@ static void fwd_txfm_32x16(const int16_t *src_diff, tran_low_t *coeff,
static void fwd_txfm_8x8(const int16_t *src_diff, tran_low_t *coeff,
int diff_stride, TxfmParam *txfm_param) {
#if CONFIG_LGT || CONFIG_DAALA_TX8
#if CONFIG_DAALA_TX8
av1_fht8x8_c(src_diff, coeff, diff_stride, txfm_param);
#else
av1_fht8x8(src_diff, coeff, diff_stride, txfm_param);
......@@ -145,38 +145,22 @@ static void fwd_txfm_64x32(const int16_t *src_diff, tran_low_t *coeff,
#if CONFIG_RECT_TX_EXT
static void fwd_txfm_16x4(const int16_t *src_diff, tran_low_t *coeff,
int diff_stride, TxfmParam *txfm_param) {
#if CONFIG_LGT
av1_fht16x4_c(src_diff, coeff, diff_stride, txfm_param);
#else
av1_fht16x4(src_diff, coeff, diff_stride, txfm_param);
#endif
}
static void fwd_txfm_4x16(const int16_t *src_diff, tran_low_t *coeff,
int diff_stride, TxfmParam *txfm_param) {
#if CONFIG_LGT
av1_fht4x16_c(src_diff, coeff, diff_stride, txfm_param);
#else
av1_fht4x16(src_diff, coeff, diff_stride, txfm_param);
#endif
}
static void fwd_txfm_32x8(const int16_t *src_diff, tran_low_t *coeff,
int diff_stride, TxfmParam *txfm_param) {
#if CONFIG_LGT
av1_fht32x8_c(src_diff, coeff, diff_stride, txfm_param);
#else
av1_fht32x8(src_diff, coeff, diff_stride, txfm_param);
#endif
}
static void fwd_txfm_8x32(const int16_t *src_diff, tran_low_t *coeff,
int diff_stride, TxfmParam *txfm_param) {
#if CONFIG_LGT
av1_fht8x32_c(src_diff, coeff, diff_stride, txfm_param);
#else
av1_fht8x32(src_diff, coeff, diff_stride, txfm_param);
#endif
}
#endif
......
......@@ -156,7 +156,6 @@ set(CONFIG_INTRABC 0 CACHE NUMBER "AV1 experiment flag.")
set(CONFIG_INTRA_EDGE 1 CACHE NUMBER "AV1 experiment flag.")
set(CONFIG_JNT_COMP 0 CACHE NUMBER "AV1 experiment flag.")
set(CONFIG_KF_CTX 1 CACHE NUMBER "AV1 experiment flag.")
set(CONFIG_LGT 0 CACHE NUMBER "AV1 experiment flag.")
set(CONFIG_LOOPFILTERING_ACROSS_TILES 1 CACHE NUMBER "AV1 experiment flag.")
set(CONFIG_LOOPFILTER_LEVEL 1 CACHE NUMBER "AV1 experiment flag.")
set(CONFIG_LOOP_RESTORATION 1 CACHE NUMBER "AV1 experiment flag.")
......
......@@ -55,9 +55,6 @@ macro (fix_experiment_configs)
if (CONFIG_DAALA_TX4 OR CONFIG_DAALA_TX8 OR CONFIG_DAALA_TX16 OR
CONFIG_DAALA_TX32 OR CONFIG_DAALA_TX64)
if (CONFIG_LGT)
change_config_and_warn(CONFIG_LGT 0 CONFIG_DAALA_TXx)
endif ()