Commit fbf8788d authored by Yaowu Xu's avatar Yaowu Xu Committed by Gerrit Code Review
Browse files

Merge "Namespace the idct/iad symbols" into nextgenv2

parents 243f87ef f0f98578
......@@ -93,7 +93,7 @@ void aom_iwht4x4_1_add_c(const tran_low_t *in, uint8_t *dest, int dest_stride) {
}
}
void idct4_c(const tran_low_t *input, tran_low_t *output) {
void aom_idct4_c(const tran_low_t *input, tran_low_t *output) {
tran_low_t step[4];
tran_high_t temp1, temp2;
// stage 1
......@@ -121,7 +121,7 @@ void aom_idct4x4_16_add_c(const tran_low_t *input, uint8_t *dest, int stride) {
// Rows
for (i = 0; i < 4; ++i) {
idct4_c(input, outptr);
aom_idct4_c(input, outptr);
input += 4;
outptr += 4;
}
......@@ -129,7 +129,7 @@ void aom_idct4x4_16_add_c(const tran_low_t *input, uint8_t *dest, int stride) {
// Columns
for (i = 0; i < 4; ++i) {
for (j = 0; j < 4; ++j) temp_in[j] = out[j * 4 + i];
idct4_c(temp_in, temp_out);
aom_idct4_c(temp_in, temp_out);
for (j = 0; j < 4; ++j) {
dest[j * stride + i] = clip_pixel_add(dest[j * stride + i],
ROUND_POWER_OF_TWO(temp_out[j], 4));
......@@ -154,7 +154,7 @@ void aom_idct4x4_1_add_c(const tran_low_t *input, uint8_t *dest,
}
}
void idct8_c(const tran_low_t *input, tran_low_t *output) {
void aom_idct8_c(const tran_low_t *input, tran_low_t *output) {
tran_low_t step1[8], step2[8];
tran_high_t temp1, temp2;
// stage 1
......@@ -216,7 +216,7 @@ void aom_idct8x8_64_add_c(const tran_low_t *input, uint8_t *dest, int stride) {
// First transform rows
for (i = 0; i < 8; ++i) {
idct8_c(input, outptr);
aom_idct8_c(input, outptr);
input += 8;
outptr += 8;
}
......@@ -224,7 +224,7 @@ void aom_idct8x8_64_add_c(const tran_low_t *input, uint8_t *dest, int stride) {
// Then transform columns
for (i = 0; i < 8; ++i) {
for (j = 0; j < 8; ++j) temp_in[j] = out[j * 8 + i];
idct8_c(temp_in, temp_out);
aom_idct8_c(temp_in, temp_out);
for (j = 0; j < 8; ++j) {
dest[j * stride + i] = clip_pixel_add(dest[j * stride + i],
ROUND_POWER_OF_TWO(temp_out[j], 5));
......@@ -244,7 +244,7 @@ void aom_idct8x8_1_add_c(const tran_low_t *input, uint8_t *dest, int stride) {
}
}
void iadst4_c(const tran_low_t *input, tran_low_t *output) {
void aom_iadst4_c(const tran_low_t *input, tran_low_t *output) {
tran_high_t s0, s1, s2, s3, s4, s5, s6, s7;
tran_low_t x0 = input[0];
......@@ -281,7 +281,7 @@ void iadst4_c(const tran_low_t *input, tran_low_t *output) {
output[3] = WRAPLOW(dct_const_round_shift(s0 + s1 - s3));
}
void iadst8_c(const tran_low_t *input, tran_low_t *output) {
void aom_iadst8_c(const tran_low_t *input, tran_low_t *output) {
int s0, s1, s2, s3, s4, s5, s6, s7;
tran_high_t x0 = input[7];
......@@ -367,7 +367,7 @@ void aom_idct8x8_12_add_c(const tran_low_t *input, uint8_t *dest, int stride) {
// First transform rows
// only first 4 row has non-zero coefs
for (i = 0; i < 4; ++i) {
idct8_c(input, outptr);
aom_idct8_c(input, outptr);
input += 8;
outptr += 8;
}
......@@ -375,7 +375,7 @@ void aom_idct8x8_12_add_c(const tran_low_t *input, uint8_t *dest, int stride) {
// Then transform columns
for (i = 0; i < 8; ++i) {
for (j = 0; j < 8; ++j) temp_in[j] = out[j * 8 + i];
idct8_c(temp_in, temp_out);
aom_idct8_c(temp_in, temp_out);
for (j = 0; j < 8; ++j) {
dest[j * stride + i] = clip_pixel_add(dest[j * stride + i],
ROUND_POWER_OF_TWO(temp_out[j], 5));
......@@ -383,7 +383,7 @@ void aom_idct8x8_12_add_c(const tran_low_t *input, uint8_t *dest, int stride) {
}
}
void idct16_c(const tran_low_t *input, tran_low_t *output) {
void aom_idct16_c(const tran_low_t *input, tran_low_t *output) {
tran_low_t step1[16], step2[16];
tran_high_t temp1, temp2;
......@@ -557,7 +557,7 @@ void aom_idct16x16_256_add_c(const tran_low_t *input, uint8_t *dest,
// First transform rows
for (i = 0; i < 16; ++i) {
idct16_c(input, outptr);
aom_idct16_c(input, outptr);
input += 16;
outptr += 16;
}
......@@ -565,7 +565,7 @@ void aom_idct16x16_256_add_c(const tran_low_t *input, uint8_t *dest,
// Then transform columns
for (i = 0; i < 16; ++i) {
for (j = 0; j < 16; ++j) temp_in[j] = out[j * 16 + i];
idct16_c(temp_in, temp_out);
aom_idct16_c(temp_in, temp_out);
for (j = 0; j < 16; ++j) {
dest[j * stride + i] = clip_pixel_add(dest[j * stride + i],
ROUND_POWER_OF_TWO(temp_out[j], 6));
......@@ -573,7 +573,7 @@ void aom_idct16x16_256_add_c(const tran_low_t *input, uint8_t *dest,
}
}
void iadst16_c(const tran_low_t *input, tran_low_t *output) {
void aom_iadst16_c(const tran_low_t *input, tran_low_t *output) {
tran_high_t s0, s1, s2, s3, s4, s5, s6, s7, s8;
tran_high_t s9, s10, s11, s12, s13, s14, s15;
......@@ -754,7 +754,7 @@ void aom_idct16x16_10_add_c(const tran_low_t *input, uint8_t *dest,
// First transform rows. Since all non-zero dct coefficients are in
// upper-left 4x4 area, we only need to calculate first 4 rows here.
for (i = 0; i < 4; ++i) {
idct16_c(input, outptr);
aom_idct16_c(input, outptr);
input += 16;
outptr += 16;
}
......@@ -762,7 +762,7 @@ void aom_idct16x16_10_add_c(const tran_low_t *input, uint8_t *dest,
// Then transform columns
for (i = 0; i < 16; ++i) {
for (j = 0; j < 16; ++j) temp_in[j] = out[j * 16 + i];
idct16_c(temp_in, temp_out);
aom_idct16_c(temp_in, temp_out);
for (j = 0; j < 16; ++j) {
dest[j * stride + i] = clip_pixel_add(dest[j * stride + i],
ROUND_POWER_OF_TWO(temp_out[j], 6));
......@@ -782,7 +782,7 @@ void aom_idct16x16_1_add_c(const tran_low_t *input, uint8_t *dest, int stride) {
}
}
void idct32_c(const tran_low_t *input, tran_low_t *output) {
void aom_idct32_c(const tran_low_t *input, tran_low_t *output) {
tran_low_t step1[32], step2[32];
tran_high_t temp1, temp2;
......@@ -1168,7 +1168,7 @@ void aom_idct32x32_1024_add_c(const tran_low_t *input, uint8_t *dest,
zero_coeff[j] = zero_coeff[2 * j] | zero_coeff[2 * j + 1];
if (zero_coeff[0] | zero_coeff[1])
idct32_c(input, outptr);
aom_idct32_c(input, outptr);
else
memset(outptr, 0, sizeof(tran_low_t) * 32);
input += 32;
......@@ -1178,7 +1178,7 @@ void aom_idct32x32_1024_add_c(const tran_low_t *input, uint8_t *dest,
// Columns
for (i = 0; i < 32; ++i) {
for (j = 0; j < 32; ++j) temp_in[j] = out[j * 32 + i];
idct32_c(temp_in, temp_out);
aom_idct32_c(temp_in, temp_out);
for (j = 0; j < 32; ++j) {
dest[j * stride + i] = clip_pixel_add(dest[j * stride + i],
ROUND_POWER_OF_TWO(temp_out[j], 6));
......@@ -1196,7 +1196,7 @@ void aom_idct32x32_135_add_c(const tran_low_t *input, uint8_t *dest,
// Rows
// only upper-left 16x16 has non-zero coeff
for (i = 0; i < 16; ++i) {
idct32_c(input, outptr);
aom_idct32_c(input, outptr);
input += 32;
outptr += 32;
}
......@@ -1204,7 +1204,7 @@ void aom_idct32x32_135_add_c(const tran_low_t *input, uint8_t *dest,
// Columns
for (i = 0; i < 32; ++i) {
for (j = 0; j < 32; ++j) temp_in[j] = out[j * 32 + i];
idct32_c(temp_in, temp_out);
aom_idct32_c(temp_in, temp_out);
for (j = 0; j < 32; ++j) {
dest[j * stride + i] = clip_pixel_add(dest[j * stride + i],
ROUND_POWER_OF_TWO(temp_out[j], 6));
......@@ -1222,7 +1222,7 @@ void aom_idct32x32_34_add_c(const tran_low_t *input, uint8_t *dest,
// Rows
// only upper-left 8x8 has non-zero coeff
for (i = 0; i < 8; ++i) {
idct32_c(input, outptr);
aom_idct32_c(input, outptr);
input += 32;
outptr += 32;
}
......@@ -1230,7 +1230,7 @@ void aom_idct32x32_34_add_c(const tran_low_t *input, uint8_t *dest,
// Columns
for (i = 0; i < 32; ++i) {
for (j = 0; j < 32; ++j) temp_in[j] = out[j * 32 + i];
idct32_c(temp_in, temp_out);
aom_idct32_c(temp_in, temp_out);
for (j = 0; j < 32; ++j) {
dest[j * stride + i] = clip_pixel_add(dest[j * stride + i],
ROUND_POWER_OF_TWO(temp_out[j], 6));
......
......@@ -97,13 +97,13 @@ static INLINE tran_high_t highbd_dct_const_round_shift(tran_high_t input) {
#endif // CONFIG_AOM_HIGHBITDEPTH
#endif // CONFIG_EMULATE_HARDWARE
void idct4_c(const tran_low_t *input, tran_low_t *output);
void idct8_c(const tran_low_t *input, tran_low_t *output);
void idct16_c(const tran_low_t *input, tran_low_t *output);
void idct32_c(const tran_low_t *input, tran_low_t *output);
void iadst4_c(const tran_low_t *input, tran_low_t *output);
void iadst8_c(const tran_low_t *input, tran_low_t *output);
void iadst16_c(const tran_low_t *input, tran_low_t *output);
void aom_idct4_c(const tran_low_t *input, tran_low_t *output);
void aom_idct8_c(const tran_low_t *input, tran_low_t *output);
void aom_idct16_c(const tran_low_t *input, tran_low_t *output);
void aom_idct32_c(const tran_low_t *input, tran_low_t *output);
void aom_iadst4_c(const tran_low_t *input, tran_low_t *output);
void aom_iadst8_c(const tran_low_t *input, tran_low_t *output);
void aom_iadst16_c(const tran_low_t *input, tran_low_t *output);
#if CONFIG_AOM_HIGHBITDEPTH
void aom_highbd_idct4_c(const tran_low_t *input, tran_low_t *output, int bd);
......
......@@ -171,7 +171,7 @@ void aom_idct4x4_1_add_sse2(const tran_low_t *input, uint8_t *dest,
RECON_AND_STORE4X4(dest + 3 * stride, dc_value);
}
void idct4_sse2(__m128i *in) {
void aom_idct4_sse2(__m128i *in) {
const __m128i k__cospi_p16_p16 = pair_set_epi16(cospi_16_64, cospi_16_64);
const __m128i k__cospi_p16_m16 = pair_set_epi16(cospi_16_64, -cospi_16_64);
const __m128i k__cospi_p24_m08 = pair_set_epi16(cospi_24_64, -cospi_8_64);
......@@ -207,7 +207,7 @@ void idct4_sse2(__m128i *in) {
in[1] = _mm_shuffle_epi32(in[1], 0x4E);
}
void iadst4_sse2(__m128i *in) {
void aom_iadst4_sse2(__m128i *in) {
const __m128i k__sinpi_p01_p04 = pair_set_epi16(sinpi_1_9, sinpi_4_9);
const __m128i k__sinpi_p03_p02 = pair_set_epi16(sinpi_3_9, sinpi_2_9);
const __m128i k__sinpi_p02_m01 = pair_set_epi16(sinpi_2_9, -sinpi_1_9);
......@@ -533,7 +533,7 @@ void aom_idct8x8_1_add_sse2(const tran_low_t *input, uint8_t *dest,
RECON_AND_STORE(dest + 7 * stride, dc_value);
}
void idct8_sse2(__m128i *in) {
void aom_idct8_sse2(__m128i *in) {
const __m128i rounding = _mm_set1_epi32(DCT_CONST_ROUNDING);
const __m128i stg1_0 = pair_set_epi16(cospi_28_64, -cospi_4_64);
const __m128i stg1_1 = pair_set_epi16(cospi_4_64, cospi_28_64);
......@@ -558,7 +558,7 @@ void idct8_sse2(__m128i *in) {
in[4], in[5], in[6], in[7]);
}
void iadst8_sse2(__m128i *in) {
void aom_iadst8_sse2(__m128i *in) {
const __m128i k__cospi_p02_p30 = pair_set_epi16(cospi_2_64, cospi_30_64);
const __m128i k__cospi_p30_m02 = pair_set_epi16(cospi_30_64, -cospi_2_64);
const __m128i k__cospi_p10_p22 = pair_set_epi16(cospi_10_64, cospi_22_64);
......@@ -2114,13 +2114,13 @@ void idct16_8col(__m128i *in) {
in[15] = _mm_sub_epi16(s[0], s[15]);
}
void idct16_sse2(__m128i *in0, __m128i *in1) {
void aom_idct16_sse2(__m128i *in0, __m128i *in1) {
array_transpose_16x16(in0, in1);
idct16_8col(in0);
idct16_8col(in1);
}
void iadst16_sse2(__m128i *in0, __m128i *in1) {
void aom_iadst16_sse2(__m128i *in0, __m128i *in1) {
array_transpose_16x16(in0, in1);
iadst16_8col(in0);
iadst16_8col(in1);
......@@ -3596,7 +3596,7 @@ void aom_highbd_idct4x4_16_add_sse2(const tran_low_t *input, uint8_t *dest8,
if (!test) {
// Do the row transform
idct4_sse2(inptr);
aom_idct4_sse2(inptr);
// Check the min & max values
max_input = _mm_max_epi16(inptr[0], inptr[1]);
......@@ -3632,7 +3632,7 @@ void aom_highbd_idct4x4_16_add_sse2(const tran_low_t *input, uint8_t *dest8,
}
if (optimised_cols) {
idct4_sse2(inptr);
aom_idct4_sse2(inptr);
// Final round and shift
inptr[0] = _mm_add_epi16(inptr[0], eight);
......@@ -3712,7 +3712,7 @@ void aom_highbd_idct8x8_64_add_sse2(const tran_low_t *input, uint8_t *dest8,
if (!test) {
// Do the row transform
idct8_sse2(inptr);
aom_idct8_sse2(inptr);
// Find the min & max for the column transform
max_input = _mm_max_epi16(inptr[0], inptr[1]);
......@@ -3749,7 +3749,7 @@ void aom_highbd_idct8x8_64_add_sse2(const tran_low_t *input, uint8_t *dest8,
}
if (optimised_cols) {
idct8_sse2(inptr);
aom_idct8_sse2(inptr);
// Final round & shift and Reconstruction and Store
{
......@@ -3813,7 +3813,7 @@ void aom_highbd_idct8x8_10_add_sse2(const tran_low_t *input, uint8_t *dest8,
if (!test) {
// Do the row transform
idct8_sse2(inptr);
aom_idct8_sse2(inptr);
// Find the min & max for the column transform
// N.B. Only first 4 cols contain non-zero coeffs
......@@ -3852,7 +3852,7 @@ void aom_highbd_idct8x8_10_add_sse2(const tran_low_t *input, uint8_t *dest8,
}
if (optimised_cols) {
idct8_sse2(inptr);
aom_idct8_sse2(inptr);
// Final round & shift and Reconstruction and Store
{
......@@ -3918,7 +3918,7 @@ void aom_highbd_idct16x16_256_add_sse2(const tran_low_t *input, uint8_t *dest8,
if (!test) {
// Do the row transform
idct16_sse2(inptr, inptr + 16);
aom_idct16_sse2(inptr, inptr + 16);
// Find the min & max for the column transform
max_input = _mm_max_epi16(inptr[0], inptr[1]);
......@@ -3960,7 +3960,7 @@ void aom_highbd_idct16x16_256_add_sse2(const tran_low_t *input, uint8_t *dest8,
}
if (optimised_cols) {
idct16_sse2(inptr, inptr + 16);
aom_idct16_sse2(inptr, inptr + 16);
// Final round & shift and Reconstruction and Store
{
......@@ -4033,7 +4033,7 @@ void aom_highbd_idct16x16_10_add_sse2(const tran_low_t *input, uint8_t *dest8,
if (!test) {
// Do the row transform (N.B. This transposes inptr)
idct16_sse2(inptr, inptr + 16);
aom_idct16_sse2(inptr, inptr + 16);
// Find the min & max for the column transform
// N.B. Only first 4 cols contain non-zero coeffs
......@@ -4078,7 +4078,7 @@ void aom_highbd_idct16x16_10_add_sse2(const tran_low_t *input, uint8_t *dest8,
}
if (optimised_cols) {
idct16_sse2(inptr, inptr + 16);
aom_idct16_sse2(inptr, inptr + 16);
// Final round & shift and Reconstruction and Store
{
......
......@@ -197,12 +197,12 @@ static INLINE void write_buffer_8x16(uint8_t *dest, __m128i *in, int stride) {
void iadst16_8col(__m128i *in);
void idct16_8col(__m128i *in);
void idct4_sse2(__m128i *in);
void idct8_sse2(__m128i *in);
void idct16_sse2(__m128i *in0, __m128i *in1);
void iadst4_sse2(__m128i *in);
void iadst8_sse2(__m128i *in);
void iadst16_sse2(__m128i *in0, __m128i *in1);
void aom_idct4_sse2(__m128i *in);
void aom_idct8_sse2(__m128i *in);
void aom_idct16_sse2(__m128i *in0, __m128i *in1);
void aom_iadst4_sse2(__m128i *in);
void aom_iadst8_sse2(__m128i *in);
void aom_iadst16_sse2(__m128i *in0, __m128i *in1);
void idct32_8col(__m128i *in0, __m128i *in1);
#endif // AOM_DSP_X86_INV_TXFM_SSE2_H_
......@@ -70,7 +70,7 @@ static void ihalfright32_c(const tran_low_t *input, tran_low_t *output) {
for (i = 0; i < 16; ++i) {
output[i] = input[16 + i] * 4;
}
idct16_c(inputhalf, output + 16);
aom_idct16_c(inputhalf, output + 16);
// Note overall scaling factor is 4 times orthogonal
}
......@@ -241,24 +241,24 @@ static void maybe_flip_strides16(uint16_t **dst, int *dstride, tran_low_t **src,
void av1_iht4x4_16_add_c(const tran_low_t *input, uint8_t *dest, int stride,
int tx_type) {
static const transform_2d IHT_4[] = {
{ idct4_c, idct4_c }, // DCT_DCT
{ iadst4_c, idct4_c }, // ADST_DCT
{ idct4_c, iadst4_c }, // DCT_ADST
{ iadst4_c, iadst4_c }, // ADST_ADST
{ aom_idct4_c, aom_idct4_c }, // DCT_DCT = 0
{ aom_iadst4_c, aom_idct4_c }, // ADST_DCT = 1
{ aom_idct4_c, aom_iadst4_c }, // DCT_ADST = 2
{ aom_iadst4_c, aom_iadst4_c }, // ADST_ADST = 3
#if CONFIG_EXT_TX
{ iadst4_c, idct4_c }, // FLIPADST_DCT
{ idct4_c, iadst4_c }, // DCT_FLIPADST
{ iadst4_c, iadst4_c }, // FLIPADST_FLIPADST
{ iadst4_c, iadst4_c }, // ADST_FLIPADST
{ iadst4_c, iadst4_c }, // FLIPADST_ADST
{ iidtx4_c, iidtx4_c }, // IDTX
{ idct4_c, iidtx4_c }, // V_DCT
{ iidtx4_c, idct4_c }, // H_DCT
{ iadst4_c, iidtx4_c }, // V_ADST
{ iidtx4_c, iadst4_c }, // H_ADST
{ iadst4_c, iidtx4_c }, // V_FLIPADST
{ iidtx4_c, iadst4_c }, // H_FLIPADST
#endif // CONFIG_EXT_TX
{ aom_iadst4_c, aom_idct4_c }, // FLIPADST_DCT
{ aom_idct4_c, aom_iadst4_c }, // DCT_FLIPADST
{ aom_iadst4_c, aom_iadst4_c }, // FLIPADST_FLIPADST
{ aom_iadst4_c, aom_iadst4_c }, // ADST_FLIPADST
{ aom_iadst4_c, aom_iadst4_c }, // FLIPADST_ADST
{ iidtx4_c, iidtx4_c }, // IDTX
{ aom_idct4_c, iidtx4_c }, // V_DCT
{ iidtx4_c, aom_idct4_c }, // H_DCT
{ aom_iadst4_c, iidtx4_c }, // V_ADST
{ iidtx4_c, aom_iadst4_c }, // H_ADST
{ aom_iadst4_c, iidtx4_c }, // V_FLIPADST
{ iidtx4_c, aom_iadst4_c }, // H_FLIPADST
#endif // CONFIG_EXT_TX
};
int i, j;
......@@ -305,22 +305,22 @@ void av1_iht4x4_16_add_c(const tran_low_t *input, uint8_t *dest, int stride,
void av1_iht4x8_32_add_c(const tran_low_t *input, uint8_t *dest, int stride,
int tx_type) {
static const transform_2d IHT_4x8[] = {
{ idct8_c, idct4_c }, // DCT_DCT
{ iadst8_c, idct4_c }, // ADST_DCT
{ idct8_c, iadst4_c }, // DCT_ADST
{ iadst8_c, iadst4_c }, // ADST_ADST
{ iadst8_c, idct4_c }, // FLIPADST_DCT
{ idct8_c, iadst4_c }, // DCT_FLIPADST
{ iadst8_c, iadst4_c }, // FLIPADST_FLIPADST
{ iadst8_c, iadst4_c }, // ADST_FLIPADST
{ iadst8_c, iadst4_c }, // FLIPADST_ADST
{ iidtx8_c, iidtx4_c }, // IDTX
{ idct8_c, iidtx4_c }, // V_DCT
{ iidtx8_c, idct4_c }, // H_DCT
{ iadst8_c, iidtx4_c }, // V_ADST
{ iidtx8_c, iadst4_c }, // H_ADST
{ iadst8_c, iidtx4_c }, // V_FLIPADST
{ iidtx8_c, iadst4_c }, // H_FLIPADST
{ aom_idct8_c, aom_idct4_c }, // DCT_DCT
{ aom_iadst8_c, aom_idct4_c }, // ADST_DCT
{ aom_idct8_c, aom_iadst4_c }, // DCT_ADST
{ aom_iadst8_c, aom_iadst4_c }, // ADST_ADST
{ aom_iadst8_c, aom_idct4_c }, // FLIPADST_DCT
{ aom_idct8_c, aom_iadst4_c }, // DCT_FLIPADST
{ aom_iadst8_c, aom_iadst4_c }, // FLIPADST_FLIPADST
{ aom_iadst8_c, aom_iadst4_c }, // ADST_FLIPADST
{ aom_iadst8_c, aom_iadst4_c }, // FLIPADST_ADST
{ iidtx8_c, iidtx4_c }, // IDTX
{ aom_idct8_c, iidtx4_c }, // V_DCT
{ iidtx8_c, aom_idct4_c }, // H_DCT
{ aom_iadst8_c, iidtx4_c }, // V_ADST
{ iidtx8_c, aom_iadst4_c }, // H_ADST
{ aom_iadst8_c, iidtx4_c }, // V_FLIPADST
{ iidtx8_c, aom_iadst4_c }, // H_FLIPADST
};
const int n = 4;
......@@ -358,22 +358,22 @@ void av1_iht4x8_32_add_c(const tran_low_t *input, uint8_t *dest, int stride,
void av1_iht8x4_32_add_c(const tran_low_t *input, uint8_t *dest, int stride,
int tx_type) {
static const transform_2d IHT_8x4[] = {
{ idct4_c, idct8_c }, // DCT_DCT
{ iadst4_c, idct8_c }, // ADST_DCT
{ idct4_c, iadst8_c }, // DCT_ADST
{ iadst4_c, iadst8_c }, // ADST_ADST
{ iadst4_c, idct8_c }, // FLIPADST_DCT
{ idct4_c, iadst8_c }, // DCT_FLIPADST
{ iadst4_c, iadst8_c }, // FLIPADST_FLIPADST
{ iadst4_c, iadst8_c }, // ADST_FLIPADST
{ iadst4_c, iadst8_c }, // FLIPADST_ADST
{ iidtx4_c, iidtx8_c }, // IDTX
{ idct4_c, iidtx8_c }, // V_DCT
{ iidtx4_c, idct8_c }, // H_DCT
{ iadst4_c, iidtx8_c }, // V_ADST
{ iidtx4_c, iadst8_c }, // H_ADST
{ iadst4_c, iidtx8_c }, // V_FLIPADST
{ iidtx4_c, iadst8_c }, // H_FLIPADST
{ aom_idct4_c, aom_idct8_c }, // DCT_DCT
{ aom_iadst4_c, aom_idct8_c }, // ADST_DCT
{ aom_idct4_c, aom_iadst8_c }, // DCT_ADST
{ aom_iadst4_c, aom_iadst8_c }, // ADST_ADST
{ aom_iadst4_c, aom_idct8_c }, // FLIPADST_DCT
{ aom_idct4_c, aom_iadst8_c }, // DCT_FLIPADST
{ aom_iadst4_c, aom_iadst8_c }, // FLIPADST_FLIPADST
{ aom_iadst4_c, aom_iadst8_c }, // ADST_FLIPADST
{ aom_iadst4_c, aom_iadst8_c }, // FLIPADST_ADST
{ iidtx4_c, iidtx8_c }, // IDTX
{ aom_idct4_c, iidtx8_c }, // V_DCT
{ iidtx4_c, aom_idct8_c }, // H_DCT
{ aom_iadst4_c, iidtx8_c }, // V_ADST
{ iidtx4_c, aom_iadst8_c }, // H_ADST
{ aom_iadst4_c, iidtx8_c }, // V_FLIPADST
{ iidtx4_c, aom_iadst8_c }, // H_FLIPADST
};
const int n = 4;
const int n2 = 8;
......@@ -411,22 +411,22 @@ void av1_iht8x4_32_add_c(const tran_low_t *input, uint8_t *dest, int stride,
void av1_iht8x16_128_add_c(const tran_low_t *input, uint8_t *dest, int stride,
int tx_type) {
static const transform_2d IHT_8x16[] = {
{ idct16_c, idct8_c }, // DCT_DCT
{ iadst16_c, idct8_c }, // ADST_DCT
{ idct16_c, iadst8_c }, // DCT_ADST
{ iadst16_c, iadst8_c }, // ADST_ADST
{ iadst16_c, idct8_c }, // FLIPADST_DCT
{ idct16_c, iadst8_c }, // DCT_FLIPADST
{ iadst16_c, iadst8_c }, // FLIPADST_FLIPADST
{ iadst16_c, iadst8_c }, // ADST_FLIPADST
{ iadst16_c, iadst8_c }, // FLIPADST_ADST
{ iidtx16_c, iidtx8_c }, // IDTX
{ idct16_c, iidtx8_c }, // V_DCT
{ iidtx16_c, idct8_c }, // H_DCT
{ iadst16_c, iidtx8_c }, // V_ADST
{ iidtx16_c, iadst8_c }, // H_ADST
{ iadst16_c, iidtx8_c }, // V_FLIPADST
{ iidtx16_c, iadst8_c }, // H_FLIPADST
{ aom_idct16_c, aom_idct8_c }, // DCT_DCT
{ aom_iadst16_c, aom_idct8_c }, // ADST_DCT
{ aom_idct16_c, aom_iadst8_c }, // DCT_ADST
{ aom_iadst16_c, aom_iadst8_c }, // ADST_ADST
{ aom_iadst16_c, aom_idct8_c }, // FLIPADST_DCT
{ aom_idct16_c, aom_iadst8_c }, // DCT_FLIPADST
{ aom_iadst16_c, aom_iadst8_c }, // FLIPADST_FLIPADST
{ aom_iadst16_c, aom_iadst8_c }, // ADST_FLIPADST
{ aom_iadst16_c, aom_iadst8_c }, // FLIPADST_ADST
{ iidtx16_c, iidtx8_c }, // IDTX
{ aom_idct16_c, iidtx8_c }, // V_DCT
{ iidtx16_c, aom_idct8_c }, // H_DCT
{ aom_iadst16_c, iidtx8_c }, // V_ADST
{ iidtx16_c, aom_iadst8_c }, // H_ADST
{ aom_iadst16_c, iidtx8_c }, // V_FLIPADST
{ iidtx16_c, aom_iadst8_c }, // H_FLIPADST
};
const int n = 8;
......@@ -464,22 +464,22 @@ void av1_iht8x16_128_add_c(const tran_low_t *input, uint8_t *dest, int stride,
void av1_iht16x8_128_add_c(const tran_low_t *input, uint8_t *dest, int stride,
int tx_type) {
static const transform_2d IHT_16x8[] = {
{ idct8_c, idct16_c }, // DCT_DCT
{ iadst8_c, idct16_c }, // ADST_DCT
{ idct8_c, iadst16_c }, // DCT_ADST
{ iadst8_c, iadst16_c }, // ADST_ADST
{ iadst8_c, idct16_c }, // FLIPADST_DCT
{ idct8_c, iadst16_c }, // DCT_FLIPADST
{ iadst8_c, iadst16_c }, // FLIPADST_FLIPADST
{ iadst8_c, iadst16_c }, // ADST_FLIPADST
{ iadst8_c, iadst16_c }, // FLIPADST_ADST
{ iidtx8_c, iidtx16_c }, // IDTX
{ idct8_c, iidtx16_c }, // V_DCT
{ iidtx8_c, idct16_c }, // H_DCT
{ iadst8_c, iidtx16_c }, // V_ADST
{ iidtx8_c, iadst16_c }, // H_ADST
{ iadst8_c, iidtx16_c }, // V_FLIPADST
{ iidtx8_c, iadst16_c }, // H_FLIPADST
{ aom_idct8_c, aom_idct16_c }, // DCT_DCT
{ aom_iadst8_c, aom_idct16_c }, // ADST_DCT
{ aom_idct8_c, aom_iadst16_c }, // DCT_ADST
{ aom_iadst8_c, aom_iadst16_c }, // ADST_ADST
{ aom_iadst8_c, aom_idct16_c }, // FLIPADST_DCT
{ aom_idct8_c, aom_iadst16_c }, // DCT_FLIPADST
{ aom_iadst8_c, aom_iadst16_c }, // FLIPADST_FLIPADST
{ aom_iadst8_c, aom_iadst16_c }, // ADST_FLIPADST
{ aom_iadst8_c, aom_iadst16_c }, // FLIPADST_ADST
{ iidtx8_c, iidtx16_c }, // IDTX
{ aom_idct8_c, iidtx16_c }, // V_DCT
{ iidtx8_c, aom_idct16_c }, // H_DCT
{ aom_iadst8_c, iidtx16_c }, // V_ADST
{ iidtx8_c, aom_iadst16_c }, // H_ADST
{ aom_iadst8_c, iidtx16_c }, // V_FLIPADST
{ iidtx8_c, aom_iadst16_c }, // H_FLIPADST
};
const int n = 8;
const int n2 = 16;
......@@ -517,22 +517,22 @@ void av1_iht16x8_128_add_c(const tran_low_t *input, uint8_t *dest, int stride,
void av1_iht16x32_512_add_c(const tran_low_t *input, uint8_t *dest, int stride,
int tx_type) {
static const transform_2d IHT_16x32[] = {
{ idct32_c, idct16_c }, // DCT_DCT
{ ihalfright32_c, idct16_c }, // ADST_DCT
{ idct32_c, iadst16_c }, // DCT_ADST
{ ihalfright32_c, iadst16_c }, // ADST_ADST
{ ihalfright32_c, idct16_c }, // FLIPADST_DCT
{ idct32_c, iadst16_c }, // DCT_FLIPADST
{ ihalfright32_c, iadst16_c }, // FLIPADST_FLIPADST
{ ihalfright32_c, iadst16_c }, // ADST_FLIPADST
{ ihalfright32_c, iadst16_c }, // FLIPADST_ADST
{ iidtx32_c, iidtx16_c }, // IDTX
{ idct32_c, iidtx16_c }, // V_DCT
{ iidtx32_c, idct16_c }, // H_DCT
{ ihalfright32_c, iidtx16_c }, // V_ADST
{ iidtx32_c, iadst16_c }, // H_ADST
{ ihalfright32_c, iidtx16_c }, // V_FLIPADST
{ iidtx32_c, iadst16_c }, // H_FLIPADST
{ aom_idct32_c, aom_idct16_c }, // DCT_DCT
{ ihalfright32_c, aom_idct16_c }, // ADST_DCT
{ aom_idct32_c, aom_iadst16_c }, // DCT_ADST
{ ihalfright32_c, aom_iadst16_c }, // ADST_ADST
{ ihalfright32_c, aom_idct16_c }, // FLIPADST_DCT
{ aom_idct32_c, aom_iadst16_c }, // DCT_FLIPADST
{ ihalfright32_c, aom_iadst16_c }, // FLIPADST_FLIPADST
{ ihalfright32_c, aom_iadst16_c }, // ADST_FLIPADST
{ ihalfright32_c, aom_iadst16_c }, // FLIPADST_ADST
{ iidtx32_c,