diff --git a/test/vp10_fwd_txfm1d_test.cc b/test/vp10_fwd_txfm1d_test.cc index a39e0ef1ff66bd1277f52a0b8e54ed864e23bc5f..bcbc6178e827c03f264009cf330f01495d0d8b88 100644 --- a/test/vp10_fwd_txfm1d_test.cc +++ b/test/vp10_fwd_txfm1d_test.cc @@ -8,8 +8,8 @@ * be found in the AUTHORS file in the root of the source tree. */ -#include "test/vp10_txfm_test.h" #include "vp10/common/vp10_fwd_txfm1d.h" +#include "test/vp10_txfm_test.h" using libvpx_test::ACMRandom; @@ -17,12 +17,14 @@ namespace { static int txfm_type_num = 2; static TYPE_TXFM txfm_type_ls[2] = {TYPE_DCT, TYPE_ADST}; -static int txfm_size_num = 4; -static int txfm_size_ls[4] = {4, 8, 16, 32}; +static int txfm_size_num = 5; +static int txfm_size_ls[5] = {4, 8, 16, 32, 64}; -static TxfmFunc fwd_txfm_func_ls[2][4] = { - {vp10_fdct4_new, vp10_fdct8_new, vp10_fdct16_new, vp10_fdct32_new}, - {vp10_fadst4_new, vp10_fadst8_new, vp10_fadst16_new, vp10_fadst32_new}}; +static TxfmFunc fwd_txfm_func_ls[2][5] = { + {vp10_fdct4_new, vp10_fdct8_new, vp10_fdct16_new, vp10_fdct32_new, + vp10_fdct64_new}, + {vp10_fadst4_new, vp10_fadst8_new, vp10_fadst16_new, vp10_fadst32_new, + NULL}}; // the maximum stage number of fwd/inv 1d dct/adst txfm is 12 static int8_t cos_bit[12] = {14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14}; @@ -104,19 +106,21 @@ TEST(vp10_fwd_txfm1d, accuracy) { int max_error = 7; const int count_test_block = 5000; - for (int ti = 0; ti < count_test_block; ++ti) { - for (int ni = 0; ni < txfm_size; ++ni) { - input[ni] = rnd.Rand16() % base - rnd.Rand16() % base; - ref_input[ni] = static_cast<double>(input[ni]); - } - - fwd_txfm_func(input, output, cos_bit, range_bit); - reference_hybrid_1d(ref_input, ref_output, txfm_size, txfm_type); - - for (int ni = 0; ni < txfm_size; ++ni) { - EXPECT_LE( - abs(output[ni] - static_cast<int32_t>(round(ref_output[ni]))), - max_error); + if (fwd_txfm_func != NULL) { + for (int ti = 0; ti < count_test_block; ++ti) { + for (int ni = 0; ni < txfm_size; ++ni) { + input[ni] = rnd.Rand16() % base - rnd.Rand16() % base; + ref_input[ni] = static_cast<double>(input[ni]); + } + + fwd_txfm_func(input, output, cos_bit, range_bit); + reference_hybrid_1d(ref_input, ref_output, txfm_size, txfm_type); + + for (int ni = 0; ni < txfm_size; ++ni) { + EXPECT_LE( + abs(output[ni] - static_cast<int32_t>(round(ref_output[ni]))), + max_error); + } } } } diff --git a/test/vp10_fwd_txfm2d_test.cc b/test/vp10_fwd_txfm2d_test.cc index e6416ccf4ce9930dd67a630bf5d49967d47afd71..d4115c95704b39a6f4cd4635b8760a14cf2537b0 100644 --- a/test/vp10_fwd_txfm2d_test.cc +++ b/test/vp10_fwd_txfm2d_test.cc @@ -8,9 +8,9 @@ * be found in the AUTHORS file in the root of the source tree. */ +#include <math.h> #include <stdio.h> #include <stdlib.h> -#include <math.h> #include "third_party/googletest/src/include/gtest/gtest.h" @@ -23,21 +23,22 @@ using libvpx_test::ACMRandom; namespace { -const int txfm_size_num = 4; -const int txfm_size_ls[4] = {4, 8, 16, 32}; -const TXFM_2D_CFG fwd_txfm_cfg_ls[4][4] = { - {fwd_txfm_2d_cfg_dct_dct_4, fwd_txfm_2d_cfg_dct_adst_4, - fwd_txfm_2d_cfg_adst_adst_4, fwd_txfm_2d_cfg_adst_dct_4}, - {fwd_txfm_2d_cfg_dct_dct_8, fwd_txfm_2d_cfg_dct_adst_8, - fwd_txfm_2d_cfg_adst_adst_8, fwd_txfm_2d_cfg_adst_dct_8}, - {fwd_txfm_2d_cfg_dct_dct_16, fwd_txfm_2d_cfg_dct_adst_16, - fwd_txfm_2d_cfg_adst_adst_16, fwd_txfm_2d_cfg_adst_dct_16}, - {fwd_txfm_2d_cfg_dct_dct_32, fwd_txfm_2d_cfg_dct_adst_32, - fwd_txfm_2d_cfg_adst_adst_32, fwd_txfm_2d_cfg_adst_dct_32}}; - -const Fwd_Txfm2d_Func fwd_txfm_func_ls[4] = { +const int txfm_size_num = 5; +const int txfm_size_ls[5] = {4, 8, 16, 32, 64}; +const TXFM_2D_CFG* fwd_txfm_cfg_ls[5][4] = { + {&fwd_txfm_2d_cfg_dct_dct_4, &fwd_txfm_2d_cfg_dct_adst_4, + &fwd_txfm_2d_cfg_adst_adst_4, &fwd_txfm_2d_cfg_adst_dct_4}, + {&fwd_txfm_2d_cfg_dct_dct_8, &fwd_txfm_2d_cfg_dct_adst_8, + &fwd_txfm_2d_cfg_adst_adst_8, &fwd_txfm_2d_cfg_adst_dct_8}, + {&fwd_txfm_2d_cfg_dct_dct_16, &fwd_txfm_2d_cfg_dct_adst_16, + &fwd_txfm_2d_cfg_adst_adst_16, &fwd_txfm_2d_cfg_adst_dct_16}, + {&fwd_txfm_2d_cfg_dct_dct_32, &fwd_txfm_2d_cfg_dct_adst_32, + &fwd_txfm_2d_cfg_adst_adst_32, &fwd_txfm_2d_cfg_adst_dct_32}, + {&fwd_txfm_2d_cfg_dct_dct_64, NULL, NULL, NULL}}; + +const Fwd_Txfm2d_Func fwd_txfm_func_ls[5] = { vp10_fwd_txfm2d_4x4, vp10_fwd_txfm2d_8x8, vp10_fwd_txfm2d_16x16, - vp10_fwd_txfm2d_32x32}; + vp10_fwd_txfm2d_32x32, vp10_fwd_txfm2d_64x64}; const int txfm_type_num = 4; const TYPE_TXFM type_ls_0[4] = {TYPE_DCT, TYPE_DCT, TYPE_ADST, TYPE_ADST}; @@ -54,44 +55,48 @@ TEST(vp10_fwd_txfm2d, accuracy) { for (int txfm_type_idx = 0; txfm_type_idx < txfm_type_num; ++txfm_type_idx) { - TXFM_2D_CFG fwd_txfm_cfg = fwd_txfm_cfg_ls[txfm_size_idx][txfm_type_idx]; - Fwd_Txfm2d_Func fwd_txfm_func = fwd_txfm_func_ls[txfm_size_idx]; - TYPE_TXFM type0 = type_ls_0[txfm_type_idx]; - TYPE_TXFM type1 = type_ls_1[txfm_type_idx]; - int amplify_bit = - fwd_txfm_cfg.shift[0] + fwd_txfm_cfg.shift[1] + fwd_txfm_cfg.shift[2]; - double amplify_factor = - amplify_bit >= 0 ? (1 << amplify_bit) : (1.0 / (1 << -amplify_bit)); - - ACMRandom rnd(ACMRandom::DeterministicSeed()); - int count = 5000; - double avg_abs_error = 0; - for (int ci = 0; ci < count; ci++) { - for (int ni = 0; ni < sqr_txfm_size; ++ni) { - input[ni] = rnd.Rand16() % base; - ref_input[ni] = static_cast<double>(input[ni]); - output[ni] = 0; - ref_output[ni] = 0; + const TXFM_2D_CFG* fwd_txfm_cfg = + fwd_txfm_cfg_ls[txfm_size_idx][txfm_type_idx]; + if (fwd_txfm_cfg != NULL) { + Fwd_Txfm2d_Func fwd_txfm_func = fwd_txfm_func_ls[txfm_size_idx]; + TYPE_TXFM type0 = type_ls_0[txfm_type_idx]; + TYPE_TXFM type1 = type_ls_1[txfm_type_idx]; + int amplify_bit = fwd_txfm_cfg->shift[0] + fwd_txfm_cfg->shift[1] + + fwd_txfm_cfg->shift[2]; + double amplify_factor = + amplify_bit >= 0 ? (1 << amplify_bit) : (1.0 / (1 << -amplify_bit)); + + ACMRandom rnd(ACMRandom::DeterministicSeed()); + int count = 500; + double avg_abs_error = 0; + for (int ci = 0; ci < count; ci++) { + for (int ni = 0; ni < sqr_txfm_size; ++ni) { + input[ni] = rnd.Rand16() % base; + ref_input[ni] = static_cast<double>(input[ni]); + output[ni] = 0; + ref_output[ni] = 0; + } + + fwd_txfm_func(input, output, txfm_size, fwd_txfm_cfg, bd); + reference_hybrid_2d(ref_input, ref_output, txfm_size, type0, type1); + + for (int ni = 0; ni < sqr_txfm_size; ++ni) { + ref_output[ni] = round(ref_output[ni] * amplify_factor); + EXPECT_LE(fabs(output[ni] - ref_output[ni]) / amplify_factor, 60); + } + avg_abs_error += compute_avg_abs_error<int32_t, double>( + output, ref_output, sqr_txfm_size); } - fwd_txfm_func(input, output, txfm_size, &fwd_txfm_cfg, bd); - reference_hybrid_2d(ref_input, ref_output, txfm_size, type0, type1); - - for (int ni = 0; ni < sqr_txfm_size; ++ni) { - ref_output[ni] = round(ref_output[ni] * amplify_factor); - EXPECT_LE(fabs(output[ni] - ref_output[ni]) / amplify_factor, 30); - } - avg_abs_error += compute_avg_abs_error<int32_t, double>( - output, ref_output, sqr_txfm_size); + avg_abs_error /= amplify_factor; + avg_abs_error /= count; + // max_abs_avg_error comes from upper bound of avg_abs_error + // printf("type0: %d type1: %d txfm_size: %d accuracy_avg_abs_error: + // %f\n", + // type0, type1, txfm_size, avg_abs_error); + double max_abs_avg_error = 5; + EXPECT_LE(avg_abs_error, max_abs_avg_error); } - - avg_abs_error /= amplify_factor; - avg_abs_error /= count; - // max_abs_avg_error comes from upper bound of avg_abs_error - // printf("type0: %d type1: %d txfm_size: %d accuracy_avg_abs_error: - // %f\n", type0, type1, txfm_size, avg_abs_error); - double max_abs_avg_error = 1.5; - EXPECT_LE(avg_abs_error, max_abs_avg_error); } delete[] input; diff --git a/test/vp10_inv_txfm1d_test.cc b/test/vp10_inv_txfm1d_test.cc index 3b716c8d1c2844c36c49726ff4e986dcf029f89c..2e9e58d439b341dd8932645806ff9669c7fddef6 100644 --- a/test/vp10_inv_txfm1d_test.cc +++ b/test/vp10_inv_txfm1d_test.cc @@ -16,16 +16,20 @@ using libvpx_test::ACMRandom; namespace { static int txfm_type_num = 2; -static int txfm_size_num = 4; -static int txfm_size_ls[4] = {4, 8, 16, 32}; +static int txfm_size_num = 5; +static int txfm_size_ls[5] = {4, 8, 16, 32, 64}; -static TxfmFunc fwd_txfm_func_ls[2][4] = { - {vp10_fdct4_new, vp10_fdct8_new, vp10_fdct16_new, vp10_fdct32_new}, - {vp10_fadst4_new, vp10_fadst8_new, vp10_fadst16_new, vp10_fadst32_new}}; +static TxfmFunc fwd_txfm_func_ls[2][5] = { + {vp10_fdct4_new, vp10_fdct8_new, vp10_fdct16_new, vp10_fdct32_new, + vp10_fdct64_new}, + {vp10_fadst4_new, vp10_fadst8_new, vp10_fadst16_new, vp10_fadst32_new, + NULL}}; -static TxfmFunc inv_txfm_func_ls[2][4] = { - {vp10_idct4_new, vp10_idct8_new, vp10_idct16_new, vp10_idct32_new}, - {vp10_iadst4_new, vp10_iadst8_new, vp10_iadst16_new, vp10_iadst32_new}}; +static TxfmFunc inv_txfm_func_ls[2][5] = { + {vp10_idct4_new, vp10_idct8_new, vp10_idct16_new, vp10_idct32_new, + vp10_idct64_new}, + {vp10_iadst4_new, vp10_iadst8_new, vp10_iadst16_new, vp10_iadst32_new, + NULL}}; // the maximum stage number of fwd/inv 1d dct/adst txfm is 12 static int8_t cos_bit[12] = {14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14}; @@ -44,19 +48,22 @@ TEST(vp10_inv_txfm1d, round_trip) { TxfmFunc inv_txfm_func = inv_txfm_func_ls[ti][si]; int max_error = 2; - const int count_test_block = 5000; - for (int ci = 0; ci < count_test_block; ++ci) { - for (int ni = 0; ni < txfm_size; ++ni) { - input[ni] = rnd.Rand16() % base - rnd.Rand16() % base; - } + if (fwd_txfm_func != NULL) { + const int count_test_block = 5000; + for (int ci = 0; ci < count_test_block; ++ci) { + for (int ni = 0; ni < txfm_size; ++ni) { + input[ni] = rnd.Rand16() % base - rnd.Rand16() % base; + } - fwd_txfm_func(input, output, cos_bit, range_bit); - inv_txfm_func(output, round_trip_output, cos_bit, range_bit); + fwd_txfm_func(input, output, cos_bit, range_bit); + inv_txfm_func(output, round_trip_output, cos_bit, range_bit); - for (int ni = 0; ni < txfm_size; ++ni) { - EXPECT_LE(abs(input[ni] - round_shift(round_trip_output[ni], - get_max_bit(txfm_size) - 1)), - max_error); + for (int ni = 0; ni < txfm_size; ++ni) { + int node_err = + abs(input[ni] - round_shift(round_trip_output[ni], + get_max_bit(txfm_size) - 1)); + EXPECT_LE(node_err, max_error); + } } } } diff --git a/test/vp10_inv_txfm2d_test.cc b/test/vp10_inv_txfm2d_test.cc index 603821ec9b831abf9819f5fd4cb0bb3a3af16e59..b0e6af5fa4661b5e4056de9c44a6461972e67954 100644 --- a/test/vp10_inv_txfm2d_test.cc +++ b/test/vp10_inv_txfm2d_test.cc @@ -25,34 +25,36 @@ using libvpx_test::ACMRandom; namespace { -const int txfm_size_num = 4; -const int txfm_size_ls[4] = {4, 8, 16, 32}; -const TXFM_2D_CFG fwd_txfm_cfg_ls[4][4] = { - {fwd_txfm_2d_cfg_dct_dct_4, fwd_txfm_2d_cfg_dct_adst_4, - fwd_txfm_2d_cfg_adst_adst_4, fwd_txfm_2d_cfg_adst_dct_4}, - {fwd_txfm_2d_cfg_dct_dct_8, fwd_txfm_2d_cfg_dct_adst_8, - fwd_txfm_2d_cfg_adst_adst_8, fwd_txfm_2d_cfg_adst_dct_8}, - {fwd_txfm_2d_cfg_dct_dct_16, fwd_txfm_2d_cfg_dct_adst_16, - fwd_txfm_2d_cfg_adst_adst_16, fwd_txfm_2d_cfg_adst_dct_16}, - {fwd_txfm_2d_cfg_dct_dct_32, fwd_txfm_2d_cfg_dct_adst_32, - fwd_txfm_2d_cfg_adst_adst_32, fwd_txfm_2d_cfg_adst_dct_32}}; - -const TXFM_2D_CFG inv_txfm_cfg_ls[4][4] = { - {inv_txfm_2d_cfg_dct_dct_4, inv_txfm_2d_cfg_dct_adst_4, - inv_txfm_2d_cfg_adst_adst_4, inv_txfm_2d_cfg_adst_dct_4}, - {inv_txfm_2d_cfg_dct_dct_8, inv_txfm_2d_cfg_dct_adst_8, - inv_txfm_2d_cfg_adst_adst_8, inv_txfm_2d_cfg_adst_dct_8}, - {inv_txfm_2d_cfg_dct_dct_16, inv_txfm_2d_cfg_dct_adst_16, - inv_txfm_2d_cfg_adst_adst_16, inv_txfm_2d_cfg_adst_dct_16}, - {inv_txfm_2d_cfg_dct_dct_32, inv_txfm_2d_cfg_dct_adst_32, - inv_txfm_2d_cfg_adst_adst_32, inv_txfm_2d_cfg_adst_dct_32}}; - -const Fwd_Txfm2d_Func fwd_txfm_func_ls[4] = { +const int txfm_size_num = 5; +const int txfm_size_ls[5] = {4, 8, 16, 32, 64}; +const TXFM_2D_CFG* fwd_txfm_cfg_ls[5][4] = { + {&fwd_txfm_2d_cfg_dct_dct_4, &fwd_txfm_2d_cfg_dct_adst_4, + &fwd_txfm_2d_cfg_adst_adst_4, &fwd_txfm_2d_cfg_adst_dct_4}, + {&fwd_txfm_2d_cfg_dct_dct_8, &fwd_txfm_2d_cfg_dct_adst_8, + &fwd_txfm_2d_cfg_adst_adst_8, &fwd_txfm_2d_cfg_adst_dct_8}, + {&fwd_txfm_2d_cfg_dct_dct_16, &fwd_txfm_2d_cfg_dct_adst_16, + &fwd_txfm_2d_cfg_adst_adst_16, &fwd_txfm_2d_cfg_adst_dct_16}, + {&fwd_txfm_2d_cfg_dct_dct_32, &fwd_txfm_2d_cfg_dct_adst_32, + &fwd_txfm_2d_cfg_adst_adst_32, &fwd_txfm_2d_cfg_adst_dct_32}, + {&fwd_txfm_2d_cfg_dct_dct_64, NULL, NULL, NULL}}; + +const TXFM_2D_CFG* inv_txfm_cfg_ls[5][4] = { + {&inv_txfm_2d_cfg_dct_dct_4, &inv_txfm_2d_cfg_dct_adst_4, + &inv_txfm_2d_cfg_adst_adst_4, &inv_txfm_2d_cfg_adst_dct_4}, + {&inv_txfm_2d_cfg_dct_dct_8, &inv_txfm_2d_cfg_dct_adst_8, + &inv_txfm_2d_cfg_adst_adst_8, &inv_txfm_2d_cfg_adst_dct_8}, + {&inv_txfm_2d_cfg_dct_dct_16, &inv_txfm_2d_cfg_dct_adst_16, + &inv_txfm_2d_cfg_adst_adst_16, &inv_txfm_2d_cfg_adst_dct_16}, + {&inv_txfm_2d_cfg_dct_dct_32, &inv_txfm_2d_cfg_dct_adst_32, + &inv_txfm_2d_cfg_adst_adst_32, &inv_txfm_2d_cfg_adst_dct_32}, + {&inv_txfm_2d_cfg_dct_dct_64, NULL, NULL, NULL}}; + +const Fwd_Txfm2d_Func fwd_txfm_func_ls[5] = { vp10_fwd_txfm2d_4x4, vp10_fwd_txfm2d_8x8, vp10_fwd_txfm2d_16x16, - vp10_fwd_txfm2d_32x32}; -const Inv_Txfm2d_Func inv_txfm_func_ls[4] = { + vp10_fwd_txfm2d_32x32, vp10_fwd_txfm2d_64x64}; +const Inv_Txfm2d_Func inv_txfm_func_ls[5] = { vp10_inv_txfm2d_add_4x4, vp10_inv_txfm2d_add_8x8, vp10_inv_txfm2d_add_16x16, - vp10_inv_txfm2d_add_32x32}; + vp10_inv_txfm2d_add_32x32, vp10_inv_txfm2d_add_64x64}; const int txfm_type_num = 4; @@ -66,44 +68,46 @@ TEST(vp10_inv_txfm2d, round_trip) { for (int txfm_type_idx = 0; txfm_type_idx < txfm_type_num; ++txfm_type_idx) { - const TXFM_2D_CFG fwd_txfm_cfg = + const TXFM_2D_CFG* fwd_txfm_cfg = fwd_txfm_cfg_ls[txfm_size_idx][txfm_type_idx]; - const TXFM_2D_CFG inv_txfm_cfg = + const TXFM_2D_CFG* inv_txfm_cfg = inv_txfm_cfg_ls[txfm_size_idx][txfm_type_idx]; - const Fwd_Txfm2d_Func fwd_txfm_func = fwd_txfm_func_ls[txfm_size_idx]; - const Inv_Txfm2d_Func inv_txfm_func = inv_txfm_func_ls[txfm_size_idx]; - const int count = 5000; - double avg_abs_error = 0; - ACMRandom rnd(ACMRandom::DeterministicSeed()); - for (int ci = 0; ci < count; ci++) { - for (int ni = 0; ni < sqr_txfm_size; ++ni) { - if (ci == 0) { - int extreme_input = base - 1; - input[ni] = extreme_input; // extreme case - ref_input[ni] = 0; - } else { - input[ni] = rnd.Rand16() % base; - ref_input[ni] = 0; + if (fwd_txfm_cfg != NULL) { + const Fwd_Txfm2d_Func fwd_txfm_func = fwd_txfm_func_ls[txfm_size_idx]; + const Inv_Txfm2d_Func inv_txfm_func = inv_txfm_func_ls[txfm_size_idx]; + const int count = 1000; + double avg_abs_error = 0; + ACMRandom rnd(ACMRandom::DeterministicSeed()); + for (int ci = 0; ci < count; ci++) { + for (int ni = 0; ni < sqr_txfm_size; ++ni) { + if (ci == 0) { + int extreme_input = base - 1; + input[ni] = extreme_input; // extreme case + ref_input[ni] = 0; + } else { + input[ni] = rnd.Rand16() % base; + ref_input[ni] = 0; + } } - } - fwd_txfm_func(input, output, txfm_size, &fwd_txfm_cfg, bd); - inv_txfm_func(output, ref_input, txfm_size, &inv_txfm_cfg, bd); + fwd_txfm_func(input, output, txfm_size, fwd_txfm_cfg, bd); + inv_txfm_func(output, ref_input, txfm_size, inv_txfm_cfg, bd); - for (int ni = 0; ni < sqr_txfm_size; ++ni) { - EXPECT_LE(abs(input[ni] - ref_input[ni]), 2); + for (int ni = 0; ni < sqr_txfm_size; ++ni) { + EXPECT_LE(abs(input[ni] - ref_input[ni]), 2); + } + avg_abs_error += compute_avg_abs_error<int16_t, uint16_t>( + input, ref_input, sqr_txfm_size); } - avg_abs_error += compute_avg_abs_error<int16_t, uint16_t>( - input, ref_input, sqr_txfm_size); - } - avg_abs_error /= count; - // max_abs_avg_error comes from upper bound of - // printf("txfm_size: %d accuracy_avg_abs_error: %f\n", txfm_size, - // avg_abs_error); - // TODO(angiebird): this upper bound is from adst_adst_8 - const double max_abs_avg_error = 0.024; - EXPECT_LE(avg_abs_error, max_abs_avg_error); + avg_abs_error /= count; + // max_abs_avg_error comes from upper bound of + // printf("txfm_size: %d accuracy_avg_abs_error: %f\n", + // txfm_size, avg_abs_error); + // TODO(angiebird): this upper bound is from adst_adst_8 + const double max_abs_avg_error = 0.024; + EXPECT_LE(avg_abs_error, max_abs_avg_error); + } } delete[] input; diff --git a/vp10/common/vp10_fwd_txfm1d.c b/vp10/common/vp10_fwd_txfm1d.c index f3da5c941f8b52acad574c94f21985c281daac66..ef24362377e3689ed517bc0b0cfa05df591d98d1 100644 --- a/vp10/common/vp10_fwd_txfm1d.c +++ b/vp10/common/vp10_fwd_txfm1d.c @@ -15,8 +15,8 @@ { \ int i, j; \ for (i = 0; i < size; ++i) { \ - int buf_bit = get_max_bit(abs(buf[i])) + 1; \ - if (buf_bit > bit) { \ + int buf_bit = get_max_bit(abs(buf[i])) + 1; \ + if (buf_bit > bit) { \ printf("======== %s overflow ========\n", __func__); \ printf("stage: %d node: %d\n", stage, i); \ printf("bit: %d buf_bit: %d buf[i]: %d\n", bit, buf_bit, buf[i]); \ @@ -32,11 +32,11 @@ #else #define range_check(stage, input, buf, size, bit) \ { \ - (void) stage; \ - (void) input; \ - (void) buf; \ - (void) size; \ - (void) bit; \ + (void)stage; \ + (void)input; \ + (void)buf; \ + (void)size; \ + (void)bit; \ } #endif @@ -1092,7 +1092,6 @@ void vp10_fadst16_new(const int32_t *input, int32_t *output, bf1[14] = bf0[9]; bf1[15] = -bf0[1]; range_check(stage, input, bf1, size, stage_range[stage]); - } void vp10_fadst32_new(const int32_t *input, int32_t *output, @@ -1529,3 +1528,796 @@ void vp10_fadst32_new(const int32_t *input, int32_t *output, bf1[31] = -bf0[1]; range_check(stage, input, bf1, size, stage_range[stage]); } + +void vp10_fdct64_new(const int32_t *input, int32_t *output, + const int8_t *cos_bit, const int8_t *stage_range) { + const int32_t size = 64; + const int32_t *cospi; + + int32_t stage = 0; + int32_t *bf0, *bf1; + int32_t step[64]; + + // stage 0; + range_check(stage, input, input, size, stage_range[stage]); + + // stage 1; + stage++; + cospi = cospi_arr[cos_bit[stage] - cos_bit_min]; + bf1 = output; + bf1[0] = input[0] + input[63]; + bf1[1] = input[1] + input[62]; + bf1[2] = input[2] + input[61]; + bf1[3] = input[3] + input[60]; + bf1[4] = input[4] + input[59]; + bf1[5] = input[5] + input[58]; + bf1[6] = input[6] + input[57]; + bf1[7] = input[7] + input[56]; + bf1[8] = input[8] + input[55]; + bf1[9] = input[9] + input[54]; + bf1[10] = input[10] + input[53]; + bf1[11] = input[11] + input[52]; + bf1[12] = input[12] + input[51]; + bf1[13] = input[13] + input[50]; + bf1[14] = input[14] + input[49]; + bf1[15] = input[15] + input[48]; + bf1[16] = input[16] + input[47]; + bf1[17] = input[17] + input[46]; + bf1[18] = input[18] + input[45]; + bf1[19] = input[19] + input[44]; + bf1[20] = input[20] + input[43]; + bf1[21] = input[21] + input[42]; + bf1[22] = input[22] + input[41]; + bf1[23] = input[23] + input[40]; + bf1[24] = input[24] + input[39]; + bf1[25] = input[25] + input[38]; + bf1[26] = input[26] + input[37]; + bf1[27] = input[27] + input[36]; + bf1[28] = input[28] + input[35]; + bf1[29] = input[29] + input[34]; + bf1[30] = input[30] + input[33]; + bf1[31] = input[31] + input[32]; + bf1[32] = -input[32] + input[31]; + bf1[33] = -input[33] + input[30]; + bf1[34] = -input[34] + input[29]; + bf1[35] = -input[35] + input[28]; + bf1[36] = -input[36] + input[27]; + bf1[37] = -input[37] + input[26]; + bf1[38] = -input[38] + input[25]; + bf1[39] = -input[39] + input[24]; + bf1[40] = -input[40] + input[23]; + bf1[41] = -input[41] + input[22]; + bf1[42] = -input[42] + input[21]; + bf1[43] = -input[43] + input[20]; + bf1[44] = -input[44] + input[19]; + bf1[45] = -input[45] + input[18]; + bf1[46] = -input[46] + input[17]; + bf1[47] = -input[47] + input[16]; + bf1[48] = -input[48] + input[15]; + bf1[49] = -input[49] + input[14]; + bf1[50] = -input[50] + input[13]; + bf1[51] = -input[51] + input[12]; + bf1[52] = -input[52] + input[11]; + bf1[53] = -input[53] + input[10]; + bf1[54] = -input[54] + input[9]; + bf1[55] = -input[55] + input[8]; + bf1[56] = -input[56] + input[7]; + bf1[57] = -input[57] + input[6]; + bf1[58] = -input[58] + input[5]; + bf1[59] = -input[59] + input[4]; + bf1[60] = -input[60] + input[3]; + bf1[61] = -input[61] + input[2]; + bf1[62] = -input[62] + input[1]; + bf1[63] = -input[63] + input[0]; + range_check(stage, input, bf1, size, stage_range[stage]); + + // stage 2 + stage++; + cospi = cospi_arr[cos_bit[stage] - cos_bit_min]; + bf0 = output; + bf1 = step; + bf1[0] = bf0[0] + bf0[31]; + bf1[1] = bf0[1] + bf0[30]; + bf1[2] = bf0[2] + bf0[29]; + bf1[3] = bf0[3] + bf0[28]; + bf1[4] = bf0[4] + bf0[27]; + bf1[5] = bf0[5] + bf0[26]; + bf1[6] = bf0[6] + bf0[25]; + bf1[7] = bf0[7] + bf0[24]; + bf1[8] = bf0[8] + bf0[23]; + bf1[9] = bf0[9] + bf0[22]; + bf1[10] = bf0[10] + bf0[21]; + bf1[11] = bf0[11] + bf0[20]; + bf1[12] = bf0[12] + bf0[19]; + bf1[13] = bf0[13] + bf0[18]; + bf1[14] = bf0[14] + bf0[17]; + bf1[15] = bf0[15] + bf0[16]; + bf1[16] = -bf0[16] + bf0[15]; + bf1[17] = -bf0[17] + bf0[14]; + bf1[18] = -bf0[18] + bf0[13]; + bf1[19] = -bf0[19] + bf0[12]; + bf1[20] = -bf0[20] + bf0[11]; + bf1[21] = -bf0[21] + bf0[10]; + bf1[22] = -bf0[22] + bf0[9]; + bf1[23] = -bf0[23] + bf0[8]; + bf1[24] = -bf0[24] + bf0[7]; + bf1[25] = -bf0[25] + bf0[6]; + bf1[26] = -bf0[26] + bf0[5]; + bf1[27] = -bf0[27] + bf0[4]; + bf1[28] = -bf0[28] + bf0[3]; + bf1[29] = -bf0[29] + bf0[2]; + bf1[30] = -bf0[30] + bf0[1]; + bf1[31] = -bf0[31] + bf0[0]; + bf1[32] = bf0[32]; + bf1[33] = bf0[33]; + bf1[34] = bf0[34]; + bf1[35] = bf0[35]; + bf1[36] = bf0[36]; + bf1[37] = bf0[37]; + bf1[38] = bf0[38]; + bf1[39] = bf0[39]; + bf1[40] = half_btf(-cospi[32], bf0[40], cospi[32], bf0[55], cos_bit[stage]); + bf1[41] = half_btf(-cospi[32], bf0[41], cospi[32], bf0[54], cos_bit[stage]); + bf1[42] = half_btf(-cospi[32], bf0[42], cospi[32], bf0[53], cos_bit[stage]); + bf1[43] = half_btf(-cospi[32], bf0[43], cospi[32], bf0[52], cos_bit[stage]); + bf1[44] = half_btf(-cospi[32], bf0[44], cospi[32], bf0[51], cos_bit[stage]); + bf1[45] = half_btf(-cospi[32], bf0[45], cospi[32], bf0[50], cos_bit[stage]); + bf1[46] = half_btf(-cospi[32], bf0[46], cospi[32], bf0[49], cos_bit[stage]); + bf1[47] = half_btf(-cospi[32], bf0[47], cospi[32], bf0[48], cos_bit[stage]); + bf1[48] = half_btf(cospi[32], bf0[48], cospi[32], bf0[47], cos_bit[stage]); + bf1[49] = half_btf(cospi[32], bf0[49], cospi[32], bf0[46], cos_bit[stage]); + bf1[50] = half_btf(cospi[32], bf0[50], cospi[32], bf0[45], cos_bit[stage]); + bf1[51] = half_btf(cospi[32], bf0[51], cospi[32], bf0[44], cos_bit[stage]); + bf1[52] = half_btf(cospi[32], bf0[52], cospi[32], bf0[43], cos_bit[stage]); + bf1[53] = half_btf(cospi[32], bf0[53], cospi[32], bf0[42], cos_bit[stage]); + bf1[54] = half_btf(cospi[32], bf0[54], cospi[32], bf0[41], cos_bit[stage]); + bf1[55] = half_btf(cospi[32], bf0[55], cospi[32], bf0[40], cos_bit[stage]); + bf1[56] = bf0[56]; + bf1[57] = bf0[57]; + bf1[58] = bf0[58]; + bf1[59] = bf0[59]; + bf1[60] = bf0[60]; + bf1[61] = bf0[61]; + bf1[62] = bf0[62]; + bf1[63] = bf0[63]; + range_check(stage, input, bf1, size, stage_range[stage]); + + // stage 3 + stage++; + cospi = cospi_arr[cos_bit[stage] - cos_bit_min]; + bf0 = step; + bf1 = output; + bf1[0] = bf0[0] + bf0[15]; + bf1[1] = bf0[1] + bf0[14]; + bf1[2] = bf0[2] + bf0[13]; + bf1[3] = bf0[3] + bf0[12]; + bf1[4] = bf0[4] + bf0[11]; + bf1[5] = bf0[5] + bf0[10]; + bf1[6] = bf0[6] + bf0[9]; + bf1[7] = bf0[7] + bf0[8]; + bf1[8] = -bf0[8] + bf0[7]; + bf1[9] = -bf0[9] + bf0[6]; + bf1[10] = -bf0[10] + bf0[5]; + bf1[11] = -bf0[11] + bf0[4]; + bf1[12] = -bf0[12] + bf0[3]; + bf1[13] = -bf0[13] + bf0[2]; + bf1[14] = -bf0[14] + bf0[1]; + bf1[15] = -bf0[15] + bf0[0]; + bf1[16] = bf0[16]; + bf1[17] = bf0[17]; + bf1[18] = bf0[18]; + bf1[19] = bf0[19]; + bf1[20] = half_btf(-cospi[32], bf0[20], cospi[32], bf0[27], cos_bit[stage]); + bf1[21] = half_btf(-cospi[32], bf0[21], cospi[32], bf0[26], cos_bit[stage]); + bf1[22] = half_btf(-cospi[32], bf0[22], cospi[32], bf0[25], cos_bit[stage]); + bf1[23] = half_btf(-cospi[32], bf0[23], cospi[32], bf0[24], cos_bit[stage]); + bf1[24] = half_btf(cospi[32], bf0[24], cospi[32], bf0[23], cos_bit[stage]); + bf1[25] = half_btf(cospi[32], bf0[25], cospi[32], bf0[22], cos_bit[stage]); + bf1[26] = half_btf(cospi[32], bf0[26], cospi[32], bf0[21], cos_bit[stage]); + bf1[27] = half_btf(cospi[32], bf0[27], cospi[32], bf0[20], cos_bit[stage]); + bf1[28] = bf0[28]; + bf1[29] = bf0[29]; + bf1[30] = bf0[30]; + bf1[31] = bf0[31]; + bf1[32] = bf0[32] + bf0[47]; + bf1[33] = bf0[33] + bf0[46]; + bf1[34] = bf0[34] + bf0[45]; + bf1[35] = bf0[35] + bf0[44]; + bf1[36] = bf0[36] + bf0[43]; + bf1[37] = bf0[37] + bf0[42]; + bf1[38] = bf0[38] + bf0[41]; + bf1[39] = bf0[39] + bf0[40]; + bf1[40] = -bf0[40] + bf0[39]; + bf1[41] = -bf0[41] + bf0[38]; + bf1[42] = -bf0[42] + bf0[37]; + bf1[43] = -bf0[43] + bf0[36]; + bf1[44] = -bf0[44] + bf0[35]; + bf1[45] = -bf0[45] + bf0[34]; + bf1[46] = -bf0[46] + bf0[33]; + bf1[47] = -bf0[47] + bf0[32]; + bf1[48] = -bf0[48] + bf0[63]; + bf1[49] = -bf0[49] + bf0[62]; + bf1[50] = -bf0[50] + bf0[61]; + bf1[51] = -bf0[51] + bf0[60]; + bf1[52] = -bf0[52] + bf0[59]; + bf1[53] = -bf0[53] + bf0[58]; + bf1[54] = -bf0[54] + bf0[57]; + bf1[55] = -bf0[55] + bf0[56]; + bf1[56] = bf0[56] + bf0[55]; + bf1[57] = bf0[57] + bf0[54]; + bf1[58] = bf0[58] + bf0[53]; + bf1[59] = bf0[59] + bf0[52]; + bf1[60] = bf0[60] + bf0[51]; + bf1[61] = bf0[61] + bf0[50]; + bf1[62] = bf0[62] + bf0[49]; + bf1[63] = bf0[63] + bf0[48]; + range_check(stage, input, bf1, size, stage_range[stage]); + + // stage 4 + stage++; + cospi = cospi_arr[cos_bit[stage] - cos_bit_min]; + bf0 = output; + bf1 = step; + bf1[0] = bf0[0] + bf0[7]; + bf1[1] = bf0[1] + bf0[6]; + bf1[2] = bf0[2] + bf0[5]; + bf1[3] = bf0[3] + bf0[4]; + bf1[4] = -bf0[4] + bf0[3]; + bf1[5] = -bf0[5] + bf0[2]; + bf1[6] = -bf0[6] + bf0[1]; + bf1[7] = -bf0[7] + bf0[0]; + bf1[8] = bf0[8]; + bf1[9] = bf0[9]; + bf1[10] = half_btf(-cospi[32], bf0[10], cospi[32], bf0[13], cos_bit[stage]); + bf1[11] = half_btf(-cospi[32], bf0[11], cospi[32], bf0[12], cos_bit[stage]); + bf1[12] = half_btf(cospi[32], bf0[12], cospi[32], bf0[11], cos_bit[stage]); + bf1[13] = half_btf(cospi[32], bf0[13], cospi[32], bf0[10], cos_bit[stage]); + bf1[14] = bf0[14]; + bf1[15] = bf0[15]; + bf1[16] = bf0[16] + bf0[23]; + bf1[17] = bf0[17] + bf0[22]; + bf1[18] = bf0[18] + bf0[21]; + bf1[19] = bf0[19] + bf0[20]; + bf1[20] = -bf0[20] + bf0[19]; + bf1[21] = -bf0[21] + bf0[18]; + bf1[22] = -bf0[22] + bf0[17]; + bf1[23] = -bf0[23] + bf0[16]; + bf1[24] = -bf0[24] + bf0[31]; + bf1[25] = -bf0[25] + bf0[30]; + bf1[26] = -bf0[26] + bf0[29]; + bf1[27] = -bf0[27] + bf0[28]; + bf1[28] = bf0[28] + bf0[27]; + bf1[29] = bf0[29] + bf0[26]; + bf1[30] = bf0[30] + bf0[25]; + bf1[31] = bf0[31] + bf0[24]; + bf1[32] = bf0[32]; + bf1[33] = bf0[33]; + bf1[34] = bf0[34]; + bf1[35] = bf0[35]; + bf1[36] = half_btf(-cospi[16], bf0[36], cospi[48], bf0[59], cos_bit[stage]); + bf1[37] = half_btf(-cospi[16], bf0[37], cospi[48], bf0[58], cos_bit[stage]); + bf1[38] = half_btf(-cospi[16], bf0[38], cospi[48], bf0[57], cos_bit[stage]); + bf1[39] = half_btf(-cospi[16], bf0[39], cospi[48], bf0[56], cos_bit[stage]); + bf1[40] = half_btf(-cospi[48], bf0[40], -cospi[16], bf0[55], cos_bit[stage]); + bf1[41] = half_btf(-cospi[48], bf0[41], -cospi[16], bf0[54], cos_bit[stage]); + bf1[42] = half_btf(-cospi[48], bf0[42], -cospi[16], bf0[53], cos_bit[stage]); + bf1[43] = half_btf(-cospi[48], bf0[43], -cospi[16], bf0[52], cos_bit[stage]); + bf1[44] = bf0[44]; + bf1[45] = bf0[45]; + bf1[46] = bf0[46]; + bf1[47] = bf0[47]; + bf1[48] = bf0[48]; + bf1[49] = bf0[49]; + bf1[50] = bf0[50]; + bf1[51] = bf0[51]; + bf1[52] = half_btf(cospi[48], bf0[52], -cospi[16], bf0[43], cos_bit[stage]); + bf1[53] = half_btf(cospi[48], bf0[53], -cospi[16], bf0[42], cos_bit[stage]); + bf1[54] = half_btf(cospi[48], bf0[54], -cospi[16], bf0[41], cos_bit[stage]); + bf1[55] = half_btf(cospi[48], bf0[55], -cospi[16], bf0[40], cos_bit[stage]); + bf1[56] = half_btf(cospi[16], bf0[56], cospi[48], bf0[39], cos_bit[stage]); + bf1[57] = half_btf(cospi[16], bf0[57], cospi[48], bf0[38], cos_bit[stage]); + bf1[58] = half_btf(cospi[16], bf0[58], cospi[48], bf0[37], cos_bit[stage]); + bf1[59] = half_btf(cospi[16], bf0[59], cospi[48], bf0[36], cos_bit[stage]); + bf1[60] = bf0[60]; + bf1[61] = bf0[61]; + bf1[62] = bf0[62]; + bf1[63] = bf0[63]; + range_check(stage, input, bf1, size, stage_range[stage]); + + // stage 5 + stage++; + cospi = cospi_arr[cos_bit[stage] - cos_bit_min]; + bf0 = step; + bf1 = output; + bf1[0] = bf0[0] + bf0[3]; + bf1[1] = bf0[1] + bf0[2]; + bf1[2] = -bf0[2] + bf0[1]; + bf1[3] = -bf0[3] + bf0[0]; + bf1[4] = bf0[4]; + bf1[5] = half_btf(-cospi[32], bf0[5], cospi[32], bf0[6], cos_bit[stage]); + bf1[6] = half_btf(cospi[32], bf0[6], cospi[32], bf0[5], cos_bit[stage]); + bf1[7] = bf0[7]; + bf1[8] = bf0[8] + bf0[11]; + bf1[9] = bf0[9] + bf0[10]; + bf1[10] = -bf0[10] + bf0[9]; + bf1[11] = -bf0[11] + bf0[8]; + bf1[12] = -bf0[12] + bf0[15]; + bf1[13] = -bf0[13] + bf0[14]; + bf1[14] = bf0[14] + bf0[13]; + bf1[15] = bf0[15] + bf0[12]; + bf1[16] = bf0[16]; + bf1[17] = bf0[17]; + bf1[18] = half_btf(-cospi[16], bf0[18], cospi[48], bf0[29], cos_bit[stage]); + bf1[19] = half_btf(-cospi[16], bf0[19], cospi[48], bf0[28], cos_bit[stage]); + bf1[20] = half_btf(-cospi[48], bf0[20], -cospi[16], bf0[27], cos_bit[stage]); + bf1[21] = half_btf(-cospi[48], bf0[21], -cospi[16], bf0[26], cos_bit[stage]); + bf1[22] = bf0[22]; + bf1[23] = bf0[23]; + bf1[24] = bf0[24]; + bf1[25] = bf0[25]; + bf1[26] = half_btf(cospi[48], bf0[26], -cospi[16], bf0[21], cos_bit[stage]); + bf1[27] = half_btf(cospi[48], bf0[27], -cospi[16], bf0[20], cos_bit[stage]); + bf1[28] = half_btf(cospi[16], bf0[28], cospi[48], bf0[19], cos_bit[stage]); + bf1[29] = half_btf(cospi[16], bf0[29], cospi[48], bf0[18], cos_bit[stage]); + bf1[30] = bf0[30]; + bf1[31] = bf0[31]; + bf1[32] = bf0[32] + bf0[39]; + bf1[33] = bf0[33] + bf0[38]; + bf1[34] = bf0[34] + bf0[37]; + bf1[35] = bf0[35] + bf0[36]; + bf1[36] = -bf0[36] + bf0[35]; + bf1[37] = -bf0[37] + bf0[34]; + bf1[38] = -bf0[38] + bf0[33]; + bf1[39] = -bf0[39] + bf0[32]; + bf1[40] = -bf0[40] + bf0[47]; + bf1[41] = -bf0[41] + bf0[46]; + bf1[42] = -bf0[42] + bf0[45]; + bf1[43] = -bf0[43] + bf0[44]; + bf1[44] = bf0[44] + bf0[43]; + bf1[45] = bf0[45] + bf0[42]; + bf1[46] = bf0[46] + bf0[41]; + bf1[47] = bf0[47] + bf0[40]; + bf1[48] = bf0[48] + bf0[55]; + bf1[49] = bf0[49] + bf0[54]; + bf1[50] = bf0[50] + bf0[53]; + bf1[51] = bf0[51] + bf0[52]; + bf1[52] = -bf0[52] + bf0[51]; + bf1[53] = -bf0[53] + bf0[50]; + bf1[54] = -bf0[54] + bf0[49]; + bf1[55] = -bf0[55] + bf0[48]; + bf1[56] = -bf0[56] + bf0[63]; + bf1[57] = -bf0[57] + bf0[62]; + bf1[58] = -bf0[58] + bf0[61]; + bf1[59] = -bf0[59] + bf0[60]; + bf1[60] = bf0[60] + bf0[59]; + bf1[61] = bf0[61] + bf0[58]; + bf1[62] = bf0[62] + bf0[57]; + bf1[63] = bf0[63] + bf0[56]; + range_check(stage, input, bf1, size, stage_range[stage]); + + // stage 6 + stage++; + cospi = cospi_arr[cos_bit[stage] - cos_bit_min]; + bf0 = output; + bf1 = step; + bf1[0] = half_btf(cospi[32], bf0[0], cospi[32], bf0[1], cos_bit[stage]); + bf1[1] = half_btf(-cospi[32], bf0[1], cospi[32], bf0[0], cos_bit[stage]); + bf1[2] = half_btf(cospi[48], bf0[2], cospi[16], bf0[3], cos_bit[stage]); + bf1[3] = half_btf(cospi[48], bf0[3], -cospi[16], bf0[2], cos_bit[stage]); + bf1[4] = bf0[4] + bf0[5]; + bf1[5] = -bf0[5] + bf0[4]; + bf1[6] = -bf0[6] + bf0[7]; + bf1[7] = bf0[7] + bf0[6]; + bf1[8] = bf0[8]; + bf1[9] = half_btf(-cospi[16], bf0[9], cospi[48], bf0[14], cos_bit[stage]); + bf1[10] = half_btf(-cospi[48], bf0[10], -cospi[16], bf0[13], cos_bit[stage]); + bf1[11] = bf0[11]; + bf1[12] = bf0[12]; + bf1[13] = half_btf(cospi[48], bf0[13], -cospi[16], bf0[10], cos_bit[stage]); + bf1[14] = half_btf(cospi[16], bf0[14], cospi[48], bf0[9], cos_bit[stage]); + bf1[15] = bf0[15]; + bf1[16] = bf0[16] + bf0[19]; + bf1[17] = bf0[17] + bf0[18]; + bf1[18] = -bf0[18] + bf0[17]; + bf1[19] = -bf0[19] + bf0[16]; + bf1[20] = -bf0[20] + bf0[23]; + bf1[21] = -bf0[21] + bf0[22]; + bf1[22] = bf0[22] + bf0[21]; + bf1[23] = bf0[23] + bf0[20]; + bf1[24] = bf0[24] + bf0[27]; + bf1[25] = bf0[25] + bf0[26]; + bf1[26] = -bf0[26] + bf0[25]; + bf1[27] = -bf0[27] + bf0[24]; + bf1[28] = -bf0[28] + bf0[31]; + bf1[29] = -bf0[29] + bf0[30]; + bf1[30] = bf0[30] + bf0[29]; + bf1[31] = bf0[31] + bf0[28]; + bf1[32] = bf0[32]; + bf1[33] = bf0[33]; + bf1[34] = half_btf(-cospi[8], bf0[34], cospi[56], bf0[61], cos_bit[stage]); + bf1[35] = half_btf(-cospi[8], bf0[35], cospi[56], bf0[60], cos_bit[stage]); + bf1[36] = half_btf(-cospi[56], bf0[36], -cospi[8], bf0[59], cos_bit[stage]); + bf1[37] = half_btf(-cospi[56], bf0[37], -cospi[8], bf0[58], cos_bit[stage]); + bf1[38] = bf0[38]; + bf1[39] = bf0[39]; + bf1[40] = bf0[40]; + bf1[41] = bf0[41]; + bf1[42] = half_btf(-cospi[40], bf0[42], cospi[24], bf0[53], cos_bit[stage]); + bf1[43] = half_btf(-cospi[40], bf0[43], cospi[24], bf0[52], cos_bit[stage]); + bf1[44] = half_btf(-cospi[24], bf0[44], -cospi[40], bf0[51], cos_bit[stage]); + bf1[45] = half_btf(-cospi[24], bf0[45], -cospi[40], bf0[50], cos_bit[stage]); + bf1[46] = bf0[46]; + bf1[47] = bf0[47]; + bf1[48] = bf0[48]; + bf1[49] = bf0[49]; + bf1[50] = half_btf(cospi[24], bf0[50], -cospi[40], bf0[45], cos_bit[stage]); + bf1[51] = half_btf(cospi[24], bf0[51], -cospi[40], bf0[44], cos_bit[stage]); + bf1[52] = half_btf(cospi[40], bf0[52], cospi[24], bf0[43], cos_bit[stage]); + bf1[53] = half_btf(cospi[40], bf0[53], cospi[24], bf0[42], cos_bit[stage]); + bf1[54] = bf0[54]; + bf1[55] = bf0[55]; + bf1[56] = bf0[56]; + bf1[57] = bf0[57]; + bf1[58] = half_btf(cospi[56], bf0[58], -cospi[8], bf0[37], cos_bit[stage]); + bf1[59] = half_btf(cospi[56], bf0[59], -cospi[8], bf0[36], cos_bit[stage]); + bf1[60] = half_btf(cospi[8], bf0[60], cospi[56], bf0[35], cos_bit[stage]); + bf1[61] = half_btf(cospi[8], bf0[61], cospi[56], bf0[34], cos_bit[stage]); + bf1[62] = bf0[62]; + bf1[63] = bf0[63]; + range_check(stage, input, bf1, size, stage_range[stage]); + + // stage 7 + stage++; + cospi = cospi_arr[cos_bit[stage] - cos_bit_min]; + bf0 = step; + bf1 = output; + bf1[0] = bf0[0]; + bf1[1] = bf0[1]; + bf1[2] = bf0[2]; + bf1[3] = bf0[3]; + bf1[4] = half_btf(cospi[56], bf0[4], cospi[8], bf0[7], cos_bit[stage]); + bf1[5] = half_btf(cospi[24], bf0[5], cospi[40], bf0[6], cos_bit[stage]); + bf1[6] = half_btf(cospi[24], bf0[6], -cospi[40], bf0[5], cos_bit[stage]); + bf1[7] = half_btf(cospi[56], bf0[7], -cospi[8], bf0[4], cos_bit[stage]); + bf1[8] = bf0[8] + bf0[9]; + bf1[9] = -bf0[9] + bf0[8]; + bf1[10] = -bf0[10] + bf0[11]; + bf1[11] = bf0[11] + bf0[10]; + bf1[12] = bf0[12] + bf0[13]; + bf1[13] = -bf0[13] + bf0[12]; + bf1[14] = -bf0[14] + bf0[15]; + bf1[15] = bf0[15] + bf0[14]; + bf1[16] = bf0[16]; + bf1[17] = half_btf(-cospi[8], bf0[17], cospi[56], bf0[30], cos_bit[stage]); + bf1[18] = half_btf(-cospi[56], bf0[18], -cospi[8], bf0[29], cos_bit[stage]); + bf1[19] = bf0[19]; + bf1[20] = bf0[20]; + bf1[21] = half_btf(-cospi[40], bf0[21], cospi[24], bf0[26], cos_bit[stage]); + bf1[22] = half_btf(-cospi[24], bf0[22], -cospi[40], bf0[25], cos_bit[stage]); + bf1[23] = bf0[23]; + bf1[24] = bf0[24]; + bf1[25] = half_btf(cospi[24], bf0[25], -cospi[40], bf0[22], cos_bit[stage]); + bf1[26] = half_btf(cospi[40], bf0[26], cospi[24], bf0[21], cos_bit[stage]); + bf1[27] = bf0[27]; + bf1[28] = bf0[28]; + bf1[29] = half_btf(cospi[56], bf0[29], -cospi[8], bf0[18], cos_bit[stage]); + bf1[30] = half_btf(cospi[8], bf0[30], cospi[56], bf0[17], cos_bit[stage]); + bf1[31] = bf0[31]; + bf1[32] = bf0[32] + bf0[35]; + bf1[33] = bf0[33] + bf0[34]; + bf1[34] = -bf0[34] + bf0[33]; + bf1[35] = -bf0[35] + bf0[32]; + bf1[36] = -bf0[36] + bf0[39]; + bf1[37] = -bf0[37] + bf0[38]; + bf1[38] = bf0[38] + bf0[37]; + bf1[39] = bf0[39] + bf0[36]; + bf1[40] = bf0[40] + bf0[43]; + bf1[41] = bf0[41] + bf0[42]; + bf1[42] = -bf0[42] + bf0[41]; + bf1[43] = -bf0[43] + bf0[40]; + bf1[44] = -bf0[44] + bf0[47]; + bf1[45] = -bf0[45] + bf0[46]; + bf1[46] = bf0[46] + bf0[45]; + bf1[47] = bf0[47] + bf0[44]; + bf1[48] = bf0[48] + bf0[51]; + bf1[49] = bf0[49] + bf0[50]; + bf1[50] = -bf0[50] + bf0[49]; + bf1[51] = -bf0[51] + bf0[48]; + bf1[52] = -bf0[52] + bf0[55]; + bf1[53] = -bf0[53] + bf0[54]; + bf1[54] = bf0[54] + bf0[53]; + bf1[55] = bf0[55] + bf0[52]; + bf1[56] = bf0[56] + bf0[59]; + bf1[57] = bf0[57] + bf0[58]; + bf1[58] = -bf0[58] + bf0[57]; + bf1[59] = -bf0[59] + bf0[56]; + bf1[60] = -bf0[60] + bf0[63]; + bf1[61] = -bf0[61] + bf0[62]; + bf1[62] = bf0[62] + bf0[61]; + bf1[63] = bf0[63] + bf0[60]; + range_check(stage, input, bf1, size, stage_range[stage]); + + // stage 8 + stage++; + cospi = cospi_arr[cos_bit[stage] - cos_bit_min]; + bf0 = output; + bf1 = step; + bf1[0] = bf0[0]; + bf1[1] = bf0[1]; + bf1[2] = bf0[2]; + bf1[3] = bf0[3]; + bf1[4] = bf0[4]; + bf1[5] = bf0[5]; + bf1[6] = bf0[6]; + bf1[7] = bf0[7]; + bf1[8] = half_btf(cospi[60], bf0[8], cospi[4], bf0[15], cos_bit[stage]); + bf1[9] = half_btf(cospi[28], bf0[9], cospi[36], bf0[14], cos_bit[stage]); + bf1[10] = half_btf(cospi[44], bf0[10], cospi[20], bf0[13], cos_bit[stage]); + bf1[11] = half_btf(cospi[12], bf0[11], cospi[52], bf0[12], cos_bit[stage]); + bf1[12] = half_btf(cospi[12], bf0[12], -cospi[52], bf0[11], cos_bit[stage]); + bf1[13] = half_btf(cospi[44], bf0[13], -cospi[20], bf0[10], cos_bit[stage]); + bf1[14] = half_btf(cospi[28], bf0[14], -cospi[36], bf0[9], cos_bit[stage]); + bf1[15] = half_btf(cospi[60], bf0[15], -cospi[4], bf0[8], cos_bit[stage]); + bf1[16] = bf0[16] + bf0[17]; + bf1[17] = -bf0[17] + bf0[16]; + bf1[18] = -bf0[18] + bf0[19]; + bf1[19] = bf0[19] + bf0[18]; + bf1[20] = bf0[20] + bf0[21]; + bf1[21] = -bf0[21] + bf0[20]; + bf1[22] = -bf0[22] + bf0[23]; + bf1[23] = bf0[23] + bf0[22]; + bf1[24] = bf0[24] + bf0[25]; + bf1[25] = -bf0[25] + bf0[24]; + bf1[26] = -bf0[26] + bf0[27]; + bf1[27] = bf0[27] + bf0[26]; + bf1[28] = bf0[28] + bf0[29]; + bf1[29] = -bf0[29] + bf0[28]; + bf1[30] = -bf0[30] + bf0[31]; + bf1[31] = bf0[31] + bf0[30]; + bf1[32] = bf0[32]; + bf1[33] = half_btf(-cospi[4], bf0[33], cospi[60], bf0[62], cos_bit[stage]); + bf1[34] = half_btf(-cospi[60], bf0[34], -cospi[4], bf0[61], cos_bit[stage]); + bf1[35] = bf0[35]; + bf1[36] = bf0[36]; + bf1[37] = half_btf(-cospi[36], bf0[37], cospi[28], bf0[58], cos_bit[stage]); + bf1[38] = half_btf(-cospi[28], bf0[38], -cospi[36], bf0[57], cos_bit[stage]); + bf1[39] = bf0[39]; + bf1[40] = bf0[40]; + bf1[41] = half_btf(-cospi[20], bf0[41], cospi[44], bf0[54], cos_bit[stage]); + bf1[42] = half_btf(-cospi[44], bf0[42], -cospi[20], bf0[53], cos_bit[stage]); + bf1[43] = bf0[43]; + bf1[44] = bf0[44]; + bf1[45] = half_btf(-cospi[52], bf0[45], cospi[12], bf0[50], cos_bit[stage]); + bf1[46] = half_btf(-cospi[12], bf0[46], -cospi[52], bf0[49], cos_bit[stage]); + bf1[47] = bf0[47]; + bf1[48] = bf0[48]; + bf1[49] = half_btf(cospi[12], bf0[49], -cospi[52], bf0[46], cos_bit[stage]); + bf1[50] = half_btf(cospi[52], bf0[50], cospi[12], bf0[45], cos_bit[stage]); + bf1[51] = bf0[51]; + bf1[52] = bf0[52]; + bf1[53] = half_btf(cospi[44], bf0[53], -cospi[20], bf0[42], cos_bit[stage]); + bf1[54] = half_btf(cospi[20], bf0[54], cospi[44], bf0[41], cos_bit[stage]); + bf1[55] = bf0[55]; + bf1[56] = bf0[56]; + bf1[57] = half_btf(cospi[28], bf0[57], -cospi[36], bf0[38], cos_bit[stage]); + bf1[58] = half_btf(cospi[36], bf0[58], cospi[28], bf0[37], cos_bit[stage]); + bf1[59] = bf0[59]; + bf1[60] = bf0[60]; + bf1[61] = half_btf(cospi[60], bf0[61], -cospi[4], bf0[34], cos_bit[stage]); + bf1[62] = half_btf(cospi[4], bf0[62], cospi[60], bf0[33], cos_bit[stage]); + bf1[63] = bf0[63]; + range_check(stage, input, bf1, size, stage_range[stage]); + + // stage 9 + stage++; + cospi = cospi_arr[cos_bit[stage] - cos_bit_min]; + bf0 = step; + bf1 = output; + bf1[0] = bf0[0]; + bf1[1] = bf0[1]; + bf1[2] = bf0[2]; + bf1[3] = bf0[3]; + bf1[4] = bf0[4]; + bf1[5] = bf0[5]; + bf1[6] = bf0[6]; + bf1[7] = bf0[7]; + bf1[8] = bf0[8]; + bf1[9] = bf0[9]; + bf1[10] = bf0[10]; + bf1[11] = bf0[11]; + bf1[12] = bf0[12]; + bf1[13] = bf0[13]; + bf1[14] = bf0[14]; + bf1[15] = bf0[15]; + bf1[16] = half_btf(cospi[62], bf0[16], cospi[2], bf0[31], cos_bit[stage]); + bf1[17] = half_btf(cospi[30], bf0[17], cospi[34], bf0[30], cos_bit[stage]); + bf1[18] = half_btf(cospi[46], bf0[18], cospi[18], bf0[29], cos_bit[stage]); + bf1[19] = half_btf(cospi[14], bf0[19], cospi[50], bf0[28], cos_bit[stage]); + bf1[20] = half_btf(cospi[54], bf0[20], cospi[10], bf0[27], cos_bit[stage]); + bf1[21] = half_btf(cospi[22], bf0[21], cospi[42], bf0[26], cos_bit[stage]); + bf1[22] = half_btf(cospi[38], bf0[22], cospi[26], bf0[25], cos_bit[stage]); + bf1[23] = half_btf(cospi[6], bf0[23], cospi[58], bf0[24], cos_bit[stage]); + bf1[24] = half_btf(cospi[6], bf0[24], -cospi[58], bf0[23], cos_bit[stage]); + bf1[25] = half_btf(cospi[38], bf0[25], -cospi[26], bf0[22], cos_bit[stage]); + bf1[26] = half_btf(cospi[22], bf0[26], -cospi[42], bf0[21], cos_bit[stage]); + bf1[27] = half_btf(cospi[54], bf0[27], -cospi[10], bf0[20], cos_bit[stage]); + bf1[28] = half_btf(cospi[14], bf0[28], -cospi[50], bf0[19], cos_bit[stage]); + bf1[29] = half_btf(cospi[46], bf0[29], -cospi[18], bf0[18], cos_bit[stage]); + bf1[30] = half_btf(cospi[30], bf0[30], -cospi[34], bf0[17], cos_bit[stage]); + bf1[31] = half_btf(cospi[62], bf0[31], -cospi[2], bf0[16], cos_bit[stage]); + bf1[32] = bf0[32] + bf0[33]; + bf1[33] = -bf0[33] + bf0[32]; + bf1[34] = -bf0[34] + bf0[35]; + bf1[35] = bf0[35] + bf0[34]; + bf1[36] = bf0[36] + bf0[37]; + bf1[37] = -bf0[37] + bf0[36]; + bf1[38] = -bf0[38] + bf0[39]; + bf1[39] = bf0[39] + bf0[38]; + bf1[40] = bf0[40] + bf0[41]; + bf1[41] = -bf0[41] + bf0[40]; + bf1[42] = -bf0[42] + bf0[43]; + bf1[43] = bf0[43] + bf0[42]; + bf1[44] = bf0[44] + bf0[45]; + bf1[45] = -bf0[45] + bf0[44]; + bf1[46] = -bf0[46] + bf0[47]; + bf1[47] = bf0[47] + bf0[46]; + bf1[48] = bf0[48] + bf0[49]; + bf1[49] = -bf0[49] + bf0[48]; + bf1[50] = -bf0[50] + bf0[51]; + bf1[51] = bf0[51] + bf0[50]; + bf1[52] = bf0[52] + bf0[53]; + bf1[53] = -bf0[53] + bf0[52]; + bf1[54] = -bf0[54] + bf0[55]; + bf1[55] = bf0[55] + bf0[54]; + bf1[56] = bf0[56] + bf0[57]; + bf1[57] = -bf0[57] + bf0[56]; + bf1[58] = -bf0[58] + bf0[59]; + bf1[59] = bf0[59] + bf0[58]; + bf1[60] = bf0[60] + bf0[61]; + bf1[61] = -bf0[61] + bf0[60]; + bf1[62] = -bf0[62] + bf0[63]; + bf1[63] = bf0[63] + bf0[62]; + range_check(stage, input, bf1, size, stage_range[stage]); + + // stage 10 + stage++; + cospi = cospi_arr[cos_bit[stage] - cos_bit_min]; + bf0 = output; + bf1 = step; + bf1[0] = bf0[0]; + bf1[1] = bf0[1]; + bf1[2] = bf0[2]; + bf1[3] = bf0[3]; + bf1[4] = bf0[4]; + bf1[5] = bf0[5]; + bf1[6] = bf0[6]; + bf1[7] = bf0[7]; + bf1[8] = bf0[8]; + bf1[9] = bf0[9]; + bf1[10] = bf0[10]; + bf1[11] = bf0[11]; + bf1[12] = bf0[12]; + bf1[13] = bf0[13]; + bf1[14] = bf0[14]; + bf1[15] = bf0[15]; + bf1[16] = bf0[16]; + bf1[17] = bf0[17]; + bf1[18] = bf0[18]; + bf1[19] = bf0[19]; + bf1[20] = bf0[20]; + bf1[21] = bf0[21]; + bf1[22] = bf0[22]; + bf1[23] = bf0[23]; + bf1[24] = bf0[24]; + bf1[25] = bf0[25]; + bf1[26] = bf0[26]; + bf1[27] = bf0[27]; + bf1[28] = bf0[28]; + bf1[29] = bf0[29]; + bf1[30] = bf0[30]; + bf1[31] = bf0[31]; + bf1[32] = half_btf(cospi[63], bf0[32], cospi[1], bf0[63], cos_bit[stage]); + bf1[33] = half_btf(cospi[31], bf0[33], cospi[33], bf0[62], cos_bit[stage]); + bf1[34] = half_btf(cospi[47], bf0[34], cospi[17], bf0[61], cos_bit[stage]); + bf1[35] = half_btf(cospi[15], bf0[35], cospi[49], bf0[60], cos_bit[stage]); + bf1[36] = half_btf(cospi[55], bf0[36], cospi[9], bf0[59], cos_bit[stage]); + bf1[37] = half_btf(cospi[23], bf0[37], cospi[41], bf0[58], cos_bit[stage]); + bf1[38] = half_btf(cospi[39], bf0[38], cospi[25], bf0[57], cos_bit[stage]); + bf1[39] = half_btf(cospi[7], bf0[39], cospi[57], bf0[56], cos_bit[stage]); + bf1[40] = half_btf(cospi[59], bf0[40], cospi[5], bf0[55], cos_bit[stage]); + bf1[41] = half_btf(cospi[27], bf0[41], cospi[37], bf0[54], cos_bit[stage]); + bf1[42] = half_btf(cospi[43], bf0[42], cospi[21], bf0[53], cos_bit[stage]); + bf1[43] = half_btf(cospi[11], bf0[43], cospi[53], bf0[52], cos_bit[stage]); + bf1[44] = half_btf(cospi[51], bf0[44], cospi[13], bf0[51], cos_bit[stage]); + bf1[45] = half_btf(cospi[19], bf0[45], cospi[45], bf0[50], cos_bit[stage]); + bf1[46] = half_btf(cospi[35], bf0[46], cospi[29], bf0[49], cos_bit[stage]); + bf1[47] = half_btf(cospi[3], bf0[47], cospi[61], bf0[48], cos_bit[stage]); + bf1[48] = half_btf(cospi[3], bf0[48], -cospi[61], bf0[47], cos_bit[stage]); + bf1[49] = half_btf(cospi[35], bf0[49], -cospi[29], bf0[46], cos_bit[stage]); + bf1[50] = half_btf(cospi[19], bf0[50], -cospi[45], bf0[45], cos_bit[stage]); + bf1[51] = half_btf(cospi[51], bf0[51], -cospi[13], bf0[44], cos_bit[stage]); + bf1[52] = half_btf(cospi[11], bf0[52], -cospi[53], bf0[43], cos_bit[stage]); + bf1[53] = half_btf(cospi[43], bf0[53], -cospi[21], bf0[42], cos_bit[stage]); + bf1[54] = half_btf(cospi[27], bf0[54], -cospi[37], bf0[41], cos_bit[stage]); + bf1[55] = half_btf(cospi[59], bf0[55], -cospi[5], bf0[40], cos_bit[stage]); + bf1[56] = half_btf(cospi[7], bf0[56], -cospi[57], bf0[39], cos_bit[stage]); + bf1[57] = half_btf(cospi[39], bf0[57], -cospi[25], bf0[38], cos_bit[stage]); + bf1[58] = half_btf(cospi[23], bf0[58], -cospi[41], bf0[37], cos_bit[stage]); + bf1[59] = half_btf(cospi[55], bf0[59], -cospi[9], bf0[36], cos_bit[stage]); + bf1[60] = half_btf(cospi[15], bf0[60], -cospi[49], bf0[35], cos_bit[stage]); + bf1[61] = half_btf(cospi[47], bf0[61], -cospi[17], bf0[34], cos_bit[stage]); + bf1[62] = half_btf(cospi[31], bf0[62], -cospi[33], bf0[33], cos_bit[stage]); + bf1[63] = half_btf(cospi[63], bf0[63], -cospi[1], bf0[32], cos_bit[stage]); + range_check(stage, input, bf1, size, stage_range[stage]); + + // stage 11 + stage++; + cospi = cospi_arr[cos_bit[stage] - cos_bit_min]; + bf0 = step; + bf1 = output; + bf1[0] = bf0[0]; + bf1[1] = bf0[32]; + bf1[2] = bf0[16]; + bf1[3] = bf0[48]; + bf1[4] = bf0[8]; + bf1[5] = bf0[40]; + bf1[6] = bf0[24]; + bf1[7] = bf0[56]; + bf1[8] = bf0[4]; + bf1[9] = bf0[36]; + bf1[10] = bf0[20]; + bf1[11] = bf0[52]; + bf1[12] = bf0[12]; + bf1[13] = bf0[44]; + bf1[14] = bf0[28]; + bf1[15] = bf0[60]; + bf1[16] = bf0[2]; + bf1[17] = bf0[34]; + bf1[18] = bf0[18]; + bf1[19] = bf0[50]; + bf1[20] = bf0[10]; + bf1[21] = bf0[42]; + bf1[22] = bf0[26]; + bf1[23] = bf0[58]; + bf1[24] = bf0[6]; + bf1[25] = bf0[38]; + bf1[26] = bf0[22]; + bf1[27] = bf0[54]; + bf1[28] = bf0[14]; + bf1[29] = bf0[46]; + bf1[30] = bf0[30]; + bf1[31] = bf0[62]; + bf1[32] = bf0[1]; + bf1[33] = bf0[33]; + bf1[34] = bf0[17]; + bf1[35] = bf0[49]; + bf1[36] = bf0[9]; + bf1[37] = bf0[41]; + bf1[38] = bf0[25]; + bf1[39] = bf0[57]; + bf1[40] = bf0[5]; + bf1[41] = bf0[37]; + bf1[42] = bf0[21]; + bf1[43] = bf0[53]; + bf1[44] = bf0[13]; + bf1[45] = bf0[45]; + bf1[46] = bf0[29]; + bf1[47] = bf0[61]; + bf1[48] = bf0[3]; + bf1[49] = bf0[35]; + bf1[50] = bf0[19]; + bf1[51] = bf0[51]; + bf1[52] = bf0[11]; + bf1[53] = bf0[43]; + bf1[54] = bf0[27]; + bf1[55] = bf0[59]; + bf1[56] = bf0[7]; + bf1[57] = bf0[39]; + bf1[58] = bf0[23]; + bf1[59] = bf0[55]; + bf1[60] = bf0[15]; + bf1[61] = bf0[47]; + bf1[62] = bf0[31]; + bf1[63] = bf0[63]; + range_check(stage, input, bf1, size, stage_range[stage]); +} diff --git a/vp10/common/vp10_fwd_txfm1d.h b/vp10/common/vp10_fwd_txfm1d.h index d5b9f4014561d2c6534d96e3a66f285aad1968cf..d06e305ac9c495f67c3e6d1d5478b4016acd0023 100644 --- a/vp10/common/vp10_fwd_txfm1d.h +++ b/vp10/common/vp10_fwd_txfm1d.h @@ -25,6 +25,8 @@ void vp10_fdct16_new(const int32_t *input, int32_t *output, const int8_t *cos_bit, const int8_t *stage_range); void vp10_fdct32_new(const int32_t *input, int32_t *output, const int8_t *cos_bit, const int8_t *stage_range); +void vp10_fdct64_new(const int32_t *input, int32_t *output, + const int8_t *cos_bit, const int8_t *stage_range); void vp10_fadst4_new(const int32_t *input, int32_t *output, const int8_t *cos_bit, const int8_t *stage_range); diff --git a/vp10/common/vp10_fwd_txfm2d.c b/vp10/common/vp10_fwd_txfm2d.c index 67449ec05790f992224cac3c74a8f454662b2059..045ca2bfab2050a6e4166c89b13f0fa51c9e5312 100644 --- a/vp10/common/vp10_fwd_txfm2d.c +++ b/vp10/common/vp10_fwd_txfm2d.c @@ -82,3 +82,11 @@ void vp10_fwd_txfm2d_32x32(const int16_t *input, int32_t *output, (void)bd; fwd_txfm2d_c(input, output, stride, cfg, txfm_buf); } + +void vp10_fwd_txfm2d_64x64(const int16_t *input, int32_t *output, + const int stride, const TXFM_2D_CFG *cfg, + const int bd) { + int txfm_buf[64 * 64 + 64 + 64]; + (void)bd; + fwd_txfm2d_c(input, output, stride, cfg, txfm_buf); +} diff --git a/vp10/common/vp10_fwd_txfm2d.h b/vp10/common/vp10_fwd_txfm2d.h index 64e6f5607e273cd590358bda5e21ee750aecae69..3829609d616ac00fdf25cd6687e59bcef4bcc8d0 100644 --- a/vp10/common/vp10_fwd_txfm2d.h +++ b/vp10/common/vp10_fwd_txfm2d.h @@ -27,6 +27,9 @@ void vp10_fwd_txfm2d_16x16(const int16_t *input, int32_t *output, void vp10_fwd_txfm2d_32x32(const int16_t *input, int32_t *output, const int stride, const TXFM_2D_CFG *cfg, const int bd); +void vp10_fwd_txfm2d_64x64(const int16_t *input, int32_t *output, + const int stride, const TXFM_2D_CFG *cfg, + const int bd); #ifdef __cplusplus } #endif diff --git a/vp10/common/vp10_fwd_txfm2d_cfg.h b/vp10/common/vp10_fwd_txfm2d_cfg.h index 5c2b4ca1029b8ecf65bcf79fc53a0081f95dccb7..bbabafe456ddd8077c630a1c08f6ba3ed0c6d0f1 100644 --- a/vp10/common/vp10_fwd_txfm2d_cfg.h +++ b/vp10/common/vp10_fwd_txfm2d_cfg.h @@ -95,6 +95,29 @@ static const TXFM_2D_CFG fwd_txfm_2d_cfg_dct_dct_32 = { vp10_fdct32_new, // .txfm_func_col vp10_fdct32_new}; // .txfm_func_row; +// ---------------- config fwd_dct_dct_64 ---------------- +static int8_t fwd_shift_dct_dct_64[3] = {2, -2, -2}; +static int8_t fwd_stage_range_col_dct_dct_64[12] = {13, 14, 15, 16, 17, 18, + 19, 19, 19, 19, 19, 19}; +static int8_t fwd_stage_range_row_dct_dct_64[12] = {17, 18, 19, 20, 21, 22, + 22, 22, 22, 22, 22, 22}; +static int8_t fwd_cos_bit_col_dct_dct_64[12] = {15, 15, 15, 15, 15, 14, + 13, 13, 13, 13, 13, 13}; +static int8_t fwd_cos_bit_row_dct_dct_64[12] = {15, 14, 13, 12, 11, 10, + 10, 10, 10, 10, 10, 10}; + +static const TXFM_2D_CFG fwd_txfm_2d_cfg_dct_dct_64 = { + 64, // .txfm_size + 12, // .stage_num_col + 12, // .stage_num_row + fwd_shift_dct_dct_64, // .shift + fwd_stage_range_col_dct_dct_64, // .stage_range_col + fwd_stage_range_row_dct_dct_64, // .stage_range_row + fwd_cos_bit_col_dct_dct_64, // .cos_bit_col + fwd_cos_bit_row_dct_dct_64, // .cos_bit_row + vp10_fdct64_new, // .txfm_func_col + vp10_fdct64_new}; // .txfm_func_row; + // ---------------- config fwd_dct_adst_4 ---------------- static const int8_t fwd_shift_dct_adst_4[3] = {5, -2, -1}; static const int8_t fwd_stage_range_col_dct_adst_4[4] = {16, 17, 18, 18}; diff --git a/vp10/common/vp10_inv_txfm1d.c b/vp10/common/vp10_inv_txfm1d.c index 606ca559fc787375553a0c147519a438b47008de..494000f7f12fb078f618ce71be71434f0eaf6baa 100644 --- a/vp10/common/vp10_inv_txfm1d.c +++ b/vp10/common/vp10_inv_txfm1d.c @@ -32,11 +32,11 @@ #else #define range_check(stage, input, buf, size, bit) \ { \ - (void) stage; \ - (void) input; \ - (void) buf; \ - (void) size; \ - (void) bit; \ + (void)stage; \ + (void)input; \ + (void)buf; \ + (void)size; \ + (void)bit; \ } #endif @@ -1535,3 +1535,796 @@ void vp10_iadst32_new(const int32_t *input, int32_t *output, bf1[31] = bf0[0]; range_check(stage, input, bf1, size, stage_range[stage]); } + +void vp10_idct64_new(const int32_t *input, int32_t *output, + const int8_t *cos_bit, const int8_t *stage_range) { + const int32_t size = 64; + const int32_t *cospi; + + int32_t stage = 0; + int32_t *bf0, *bf1; + int32_t step[64]; + + // stage 0; + range_check(stage, input, input, size, stage_range[stage]); + + // stage 1; + stage++; + cospi = cospi_arr[cos_bit[stage] - cos_bit_min]; + bf1 = output; + bf1[0] = input[0]; + bf1[1] = input[32]; + bf1[2] = input[16]; + bf1[3] = input[48]; + bf1[4] = input[8]; + bf1[5] = input[40]; + bf1[6] = input[24]; + bf1[7] = input[56]; + bf1[8] = input[4]; + bf1[9] = input[36]; + bf1[10] = input[20]; + bf1[11] = input[52]; + bf1[12] = input[12]; + bf1[13] = input[44]; + bf1[14] = input[28]; + bf1[15] = input[60]; + bf1[16] = input[2]; + bf1[17] = input[34]; + bf1[18] = input[18]; + bf1[19] = input[50]; + bf1[20] = input[10]; + bf1[21] = input[42]; + bf1[22] = input[26]; + bf1[23] = input[58]; + bf1[24] = input[6]; + bf1[25] = input[38]; + bf1[26] = input[22]; + bf1[27] = input[54]; + bf1[28] = input[14]; + bf1[29] = input[46]; + bf1[30] = input[30]; + bf1[31] = input[62]; + bf1[32] = input[1]; + bf1[33] = input[33]; + bf1[34] = input[17]; + bf1[35] = input[49]; + bf1[36] = input[9]; + bf1[37] = input[41]; + bf1[38] = input[25]; + bf1[39] = input[57]; + bf1[40] = input[5]; + bf1[41] = input[37]; + bf1[42] = input[21]; + bf1[43] = input[53]; + bf1[44] = input[13]; + bf1[45] = input[45]; + bf1[46] = input[29]; + bf1[47] = input[61]; + bf1[48] = input[3]; + bf1[49] = input[35]; + bf1[50] = input[19]; + bf1[51] = input[51]; + bf1[52] = input[11]; + bf1[53] = input[43]; + bf1[54] = input[27]; + bf1[55] = input[59]; + bf1[56] = input[7]; + bf1[57] = input[39]; + bf1[58] = input[23]; + bf1[59] = input[55]; + bf1[60] = input[15]; + bf1[61] = input[47]; + bf1[62] = input[31]; + bf1[63] = input[63]; + range_check(stage, input, bf1, size, stage_range[stage]); + + // stage 2 + stage++; + cospi = cospi_arr[cos_bit[stage] - cos_bit_min]; + bf0 = output; + bf1 = step; + bf1[0] = bf0[0]; + bf1[1] = bf0[1]; + bf1[2] = bf0[2]; + bf1[3] = bf0[3]; + bf1[4] = bf0[4]; + bf1[5] = bf0[5]; + bf1[6] = bf0[6]; + bf1[7] = bf0[7]; + bf1[8] = bf0[8]; + bf1[9] = bf0[9]; + bf1[10] = bf0[10]; + bf1[11] = bf0[11]; + bf1[12] = bf0[12]; + bf1[13] = bf0[13]; + bf1[14] = bf0[14]; + bf1[15] = bf0[15]; + bf1[16] = bf0[16]; + bf1[17] = bf0[17]; + bf1[18] = bf0[18]; + bf1[19] = bf0[19]; + bf1[20] = bf0[20]; + bf1[21] = bf0[21]; + bf1[22] = bf0[22]; + bf1[23] = bf0[23]; + bf1[24] = bf0[24]; + bf1[25] = bf0[25]; + bf1[26] = bf0[26]; + bf1[27] = bf0[27]; + bf1[28] = bf0[28]; + bf1[29] = bf0[29]; + bf1[30] = bf0[30]; + bf1[31] = bf0[31]; + bf1[32] = half_btf(cospi[63], bf0[32], -cospi[1], bf0[63], cos_bit[stage]); + bf1[33] = half_btf(cospi[31], bf0[33], -cospi[33], bf0[62], cos_bit[stage]); + bf1[34] = half_btf(cospi[47], bf0[34], -cospi[17], bf0[61], cos_bit[stage]); + bf1[35] = half_btf(cospi[15], bf0[35], -cospi[49], bf0[60], cos_bit[stage]); + bf1[36] = half_btf(cospi[55], bf0[36], -cospi[9], bf0[59], cos_bit[stage]); + bf1[37] = half_btf(cospi[23], bf0[37], -cospi[41], bf0[58], cos_bit[stage]); + bf1[38] = half_btf(cospi[39], bf0[38], -cospi[25], bf0[57], cos_bit[stage]); + bf1[39] = half_btf(cospi[7], bf0[39], -cospi[57], bf0[56], cos_bit[stage]); + bf1[40] = half_btf(cospi[59], bf0[40], -cospi[5], bf0[55], cos_bit[stage]); + bf1[41] = half_btf(cospi[27], bf0[41], -cospi[37], bf0[54], cos_bit[stage]); + bf1[42] = half_btf(cospi[43], bf0[42], -cospi[21], bf0[53], cos_bit[stage]); + bf1[43] = half_btf(cospi[11], bf0[43], -cospi[53], bf0[52], cos_bit[stage]); + bf1[44] = half_btf(cospi[51], bf0[44], -cospi[13], bf0[51], cos_bit[stage]); + bf1[45] = half_btf(cospi[19], bf0[45], -cospi[45], bf0[50], cos_bit[stage]); + bf1[46] = half_btf(cospi[35], bf0[46], -cospi[29], bf0[49], cos_bit[stage]); + bf1[47] = half_btf(cospi[3], bf0[47], -cospi[61], bf0[48], cos_bit[stage]); + bf1[48] = half_btf(cospi[61], bf0[47], cospi[3], bf0[48], cos_bit[stage]); + bf1[49] = half_btf(cospi[29], bf0[46], cospi[35], bf0[49], cos_bit[stage]); + bf1[50] = half_btf(cospi[45], bf0[45], cospi[19], bf0[50], cos_bit[stage]); + bf1[51] = half_btf(cospi[13], bf0[44], cospi[51], bf0[51], cos_bit[stage]); + bf1[52] = half_btf(cospi[53], bf0[43], cospi[11], bf0[52], cos_bit[stage]); + bf1[53] = half_btf(cospi[21], bf0[42], cospi[43], bf0[53], cos_bit[stage]); + bf1[54] = half_btf(cospi[37], bf0[41], cospi[27], bf0[54], cos_bit[stage]); + bf1[55] = half_btf(cospi[5], bf0[40], cospi[59], bf0[55], cos_bit[stage]); + bf1[56] = half_btf(cospi[57], bf0[39], cospi[7], bf0[56], cos_bit[stage]); + bf1[57] = half_btf(cospi[25], bf0[38], cospi[39], bf0[57], cos_bit[stage]); + bf1[58] = half_btf(cospi[41], bf0[37], cospi[23], bf0[58], cos_bit[stage]); + bf1[59] = half_btf(cospi[9], bf0[36], cospi[55], bf0[59], cos_bit[stage]); + bf1[60] = half_btf(cospi[49], bf0[35], cospi[15], bf0[60], cos_bit[stage]); + bf1[61] = half_btf(cospi[17], bf0[34], cospi[47], bf0[61], cos_bit[stage]); + bf1[62] = half_btf(cospi[33], bf0[33], cospi[31], bf0[62], cos_bit[stage]); + bf1[63] = half_btf(cospi[1], bf0[32], cospi[63], bf0[63], cos_bit[stage]); + range_check(stage, input, bf1, size, stage_range[stage]); + + // stage 3 + stage++; + cospi = cospi_arr[cos_bit[stage] - cos_bit_min]; + bf0 = step; + bf1 = output; + bf1[0] = bf0[0]; + bf1[1] = bf0[1]; + bf1[2] = bf0[2]; + bf1[3] = bf0[3]; + bf1[4] = bf0[4]; + bf1[5] = bf0[5]; + bf1[6] = bf0[6]; + bf1[7] = bf0[7]; + bf1[8] = bf0[8]; + bf1[9] = bf0[9]; + bf1[10] = bf0[10]; + bf1[11] = bf0[11]; + bf1[12] = bf0[12]; + bf1[13] = bf0[13]; + bf1[14] = bf0[14]; + bf1[15] = bf0[15]; + bf1[16] = half_btf(cospi[62], bf0[16], -cospi[2], bf0[31], cos_bit[stage]); + bf1[17] = half_btf(cospi[30], bf0[17], -cospi[34], bf0[30], cos_bit[stage]); + bf1[18] = half_btf(cospi[46], bf0[18], -cospi[18], bf0[29], cos_bit[stage]); + bf1[19] = half_btf(cospi[14], bf0[19], -cospi[50], bf0[28], cos_bit[stage]); + bf1[20] = half_btf(cospi[54], bf0[20], -cospi[10], bf0[27], cos_bit[stage]); + bf1[21] = half_btf(cospi[22], bf0[21], -cospi[42], bf0[26], cos_bit[stage]); + bf1[22] = half_btf(cospi[38], bf0[22], -cospi[26], bf0[25], cos_bit[stage]); + bf1[23] = half_btf(cospi[6], bf0[23], -cospi[58], bf0[24], cos_bit[stage]); + bf1[24] = half_btf(cospi[58], bf0[23], cospi[6], bf0[24], cos_bit[stage]); + bf1[25] = half_btf(cospi[26], bf0[22], cospi[38], bf0[25], cos_bit[stage]); + bf1[26] = half_btf(cospi[42], bf0[21], cospi[22], bf0[26], cos_bit[stage]); + bf1[27] = half_btf(cospi[10], bf0[20], cospi[54], bf0[27], cos_bit[stage]); + bf1[28] = half_btf(cospi[50], bf0[19], cospi[14], bf0[28], cos_bit[stage]); + bf1[29] = half_btf(cospi[18], bf0[18], cospi[46], bf0[29], cos_bit[stage]); + bf1[30] = half_btf(cospi[34], bf0[17], cospi[30], bf0[30], cos_bit[stage]); + bf1[31] = half_btf(cospi[2], bf0[16], cospi[62], bf0[31], cos_bit[stage]); + bf1[32] = bf0[32] + bf0[33]; + bf1[33] = bf0[32] - bf0[33]; + bf1[34] = -bf0[34] + bf0[35]; + bf1[35] = bf0[34] + bf0[35]; + bf1[36] = bf0[36] + bf0[37]; + bf1[37] = bf0[36] - bf0[37]; + bf1[38] = -bf0[38] + bf0[39]; + bf1[39] = bf0[38] + bf0[39]; + bf1[40] = bf0[40] + bf0[41]; + bf1[41] = bf0[40] - bf0[41]; + bf1[42] = -bf0[42] + bf0[43]; + bf1[43] = bf0[42] + bf0[43]; + bf1[44] = bf0[44] + bf0[45]; + bf1[45] = bf0[44] - bf0[45]; + bf1[46] = -bf0[46] + bf0[47]; + bf1[47] = bf0[46] + bf0[47]; + bf1[48] = bf0[48] + bf0[49]; + bf1[49] = bf0[48] - bf0[49]; + bf1[50] = -bf0[50] + bf0[51]; + bf1[51] = bf0[50] + bf0[51]; + bf1[52] = bf0[52] + bf0[53]; + bf1[53] = bf0[52] - bf0[53]; + bf1[54] = -bf0[54] + bf0[55]; + bf1[55] = bf0[54] + bf0[55]; + bf1[56] = bf0[56] + bf0[57]; + bf1[57] = bf0[56] - bf0[57]; + bf1[58] = -bf0[58] + bf0[59]; + bf1[59] = bf0[58] + bf0[59]; + bf1[60] = bf0[60] + bf0[61]; + bf1[61] = bf0[60] - bf0[61]; + bf1[62] = -bf0[62] + bf0[63]; + bf1[63] = bf0[62] + bf0[63]; + range_check(stage, input, bf1, size, stage_range[stage]); + + // stage 4 + stage++; + cospi = cospi_arr[cos_bit[stage] - cos_bit_min]; + bf0 = output; + bf1 = step; + bf1[0] = bf0[0]; + bf1[1] = bf0[1]; + bf1[2] = bf0[2]; + bf1[3] = bf0[3]; + bf1[4] = bf0[4]; + bf1[5] = bf0[5]; + bf1[6] = bf0[6]; + bf1[7] = bf0[7]; + bf1[8] = half_btf(cospi[60], bf0[8], -cospi[4], bf0[15], cos_bit[stage]); + bf1[9] = half_btf(cospi[28], bf0[9], -cospi[36], bf0[14], cos_bit[stage]); + bf1[10] = half_btf(cospi[44], bf0[10], -cospi[20], bf0[13], cos_bit[stage]); + bf1[11] = half_btf(cospi[12], bf0[11], -cospi[52], bf0[12], cos_bit[stage]); + bf1[12] = half_btf(cospi[52], bf0[11], cospi[12], bf0[12], cos_bit[stage]); + bf1[13] = half_btf(cospi[20], bf0[10], cospi[44], bf0[13], cos_bit[stage]); + bf1[14] = half_btf(cospi[36], bf0[9], cospi[28], bf0[14], cos_bit[stage]); + bf1[15] = half_btf(cospi[4], bf0[8], cospi[60], bf0[15], cos_bit[stage]); + bf1[16] = bf0[16] + bf0[17]; + bf1[17] = bf0[16] - bf0[17]; + bf1[18] = -bf0[18] + bf0[19]; + bf1[19] = bf0[18] + bf0[19]; + bf1[20] = bf0[20] + bf0[21]; + bf1[21] = bf0[20] - bf0[21]; + bf1[22] = -bf0[22] + bf0[23]; + bf1[23] = bf0[22] + bf0[23]; + bf1[24] = bf0[24] + bf0[25]; + bf1[25] = bf0[24] - bf0[25]; + bf1[26] = -bf0[26] + bf0[27]; + bf1[27] = bf0[26] + bf0[27]; + bf1[28] = bf0[28] + bf0[29]; + bf1[29] = bf0[28] - bf0[29]; + bf1[30] = -bf0[30] + bf0[31]; + bf1[31] = bf0[30] + bf0[31]; + bf1[32] = bf0[32]; + bf1[33] = half_btf(-cospi[4], bf0[33], cospi[60], bf0[62], cos_bit[stage]); + bf1[34] = half_btf(-cospi[60], bf0[34], -cospi[4], bf0[61], cos_bit[stage]); + bf1[35] = bf0[35]; + bf1[36] = bf0[36]; + bf1[37] = half_btf(-cospi[36], bf0[37], cospi[28], bf0[58], cos_bit[stage]); + bf1[38] = half_btf(-cospi[28], bf0[38], -cospi[36], bf0[57], cos_bit[stage]); + bf1[39] = bf0[39]; + bf1[40] = bf0[40]; + bf1[41] = half_btf(-cospi[20], bf0[41], cospi[44], bf0[54], cos_bit[stage]); + bf1[42] = half_btf(-cospi[44], bf0[42], -cospi[20], bf0[53], cos_bit[stage]); + bf1[43] = bf0[43]; + bf1[44] = bf0[44]; + bf1[45] = half_btf(-cospi[52], bf0[45], cospi[12], bf0[50], cos_bit[stage]); + bf1[46] = half_btf(-cospi[12], bf0[46], -cospi[52], bf0[49], cos_bit[stage]); + bf1[47] = bf0[47]; + bf1[48] = bf0[48]; + bf1[49] = half_btf(-cospi[52], bf0[46], cospi[12], bf0[49], cos_bit[stage]); + bf1[50] = half_btf(cospi[12], bf0[45], cospi[52], bf0[50], cos_bit[stage]); + bf1[51] = bf0[51]; + bf1[52] = bf0[52]; + bf1[53] = half_btf(-cospi[20], bf0[42], cospi[44], bf0[53], cos_bit[stage]); + bf1[54] = half_btf(cospi[44], bf0[41], cospi[20], bf0[54], cos_bit[stage]); + bf1[55] = bf0[55]; + bf1[56] = bf0[56]; + bf1[57] = half_btf(-cospi[36], bf0[38], cospi[28], bf0[57], cos_bit[stage]); + bf1[58] = half_btf(cospi[28], bf0[37], cospi[36], bf0[58], cos_bit[stage]); + bf1[59] = bf0[59]; + bf1[60] = bf0[60]; + bf1[61] = half_btf(-cospi[4], bf0[34], cospi[60], bf0[61], cos_bit[stage]); + bf1[62] = half_btf(cospi[60], bf0[33], cospi[4], bf0[62], cos_bit[stage]); + bf1[63] = bf0[63]; + range_check(stage, input, bf1, size, stage_range[stage]); + + // stage 5 + stage++; + cospi = cospi_arr[cos_bit[stage] - cos_bit_min]; + bf0 = step; + bf1 = output; + bf1[0] = bf0[0]; + bf1[1] = bf0[1]; + bf1[2] = bf0[2]; + bf1[3] = bf0[3]; + bf1[4] = half_btf(cospi[56], bf0[4], -cospi[8], bf0[7], cos_bit[stage]); + bf1[5] = half_btf(cospi[24], bf0[5], -cospi[40], bf0[6], cos_bit[stage]); + bf1[6] = half_btf(cospi[40], bf0[5], cospi[24], bf0[6], cos_bit[stage]); + bf1[7] = half_btf(cospi[8], bf0[4], cospi[56], bf0[7], cos_bit[stage]); + bf1[8] = bf0[8] + bf0[9]; + bf1[9] = bf0[8] - bf0[9]; + bf1[10] = -bf0[10] + bf0[11]; + bf1[11] = bf0[10] + bf0[11]; + bf1[12] = bf0[12] + bf0[13]; + bf1[13] = bf0[12] - bf0[13]; + bf1[14] = -bf0[14] + bf0[15]; + bf1[15] = bf0[14] + bf0[15]; + bf1[16] = bf0[16]; + bf1[17] = half_btf(-cospi[8], bf0[17], cospi[56], bf0[30], cos_bit[stage]); + bf1[18] = half_btf(-cospi[56], bf0[18], -cospi[8], bf0[29], cos_bit[stage]); + bf1[19] = bf0[19]; + bf1[20] = bf0[20]; + bf1[21] = half_btf(-cospi[40], bf0[21], cospi[24], bf0[26], cos_bit[stage]); + bf1[22] = half_btf(-cospi[24], bf0[22], -cospi[40], bf0[25], cos_bit[stage]); + bf1[23] = bf0[23]; + bf1[24] = bf0[24]; + bf1[25] = half_btf(-cospi[40], bf0[22], cospi[24], bf0[25], cos_bit[stage]); + bf1[26] = half_btf(cospi[24], bf0[21], cospi[40], bf0[26], cos_bit[stage]); + bf1[27] = bf0[27]; + bf1[28] = bf0[28]; + bf1[29] = half_btf(-cospi[8], bf0[18], cospi[56], bf0[29], cos_bit[stage]); + bf1[30] = half_btf(cospi[56], bf0[17], cospi[8], bf0[30], cos_bit[stage]); + bf1[31] = bf0[31]; + bf1[32] = bf0[32] + bf0[35]; + bf1[33] = bf0[33] + bf0[34]; + bf1[34] = bf0[33] - bf0[34]; + bf1[35] = bf0[32] - bf0[35]; + bf1[36] = -bf0[36] + bf0[39]; + bf1[37] = -bf0[37] + bf0[38]; + bf1[38] = bf0[37] + bf0[38]; + bf1[39] = bf0[36] + bf0[39]; + bf1[40] = bf0[40] + bf0[43]; + bf1[41] = bf0[41] + bf0[42]; + bf1[42] = bf0[41] - bf0[42]; + bf1[43] = bf0[40] - bf0[43]; + bf1[44] = -bf0[44] + bf0[47]; + bf1[45] = -bf0[45] + bf0[46]; + bf1[46] = bf0[45] + bf0[46]; + bf1[47] = bf0[44] + bf0[47]; + bf1[48] = bf0[48] + bf0[51]; + bf1[49] = bf0[49] + bf0[50]; + bf1[50] = bf0[49] - bf0[50]; + bf1[51] = bf0[48] - bf0[51]; + bf1[52] = -bf0[52] + bf0[55]; + bf1[53] = -bf0[53] + bf0[54]; + bf1[54] = bf0[53] + bf0[54]; + bf1[55] = bf0[52] + bf0[55]; + bf1[56] = bf0[56] + bf0[59]; + bf1[57] = bf0[57] + bf0[58]; + bf1[58] = bf0[57] - bf0[58]; + bf1[59] = bf0[56] - bf0[59]; + bf1[60] = -bf0[60] + bf0[63]; + bf1[61] = -bf0[61] + bf0[62]; + bf1[62] = bf0[61] + bf0[62]; + bf1[63] = bf0[60] + bf0[63]; + range_check(stage, input, bf1, size, stage_range[stage]); + + // stage 6 + stage++; + cospi = cospi_arr[cos_bit[stage] - cos_bit_min]; + bf0 = output; + bf1 = step; + bf1[0] = half_btf(cospi[32], bf0[0], cospi[32], bf0[1], cos_bit[stage]); + bf1[1] = half_btf(cospi[32], bf0[0], -cospi[32], bf0[1], cos_bit[stage]); + bf1[2] = half_btf(cospi[48], bf0[2], -cospi[16], bf0[3], cos_bit[stage]); + bf1[3] = half_btf(cospi[16], bf0[2], cospi[48], bf0[3], cos_bit[stage]); + bf1[4] = bf0[4] + bf0[5]; + bf1[5] = bf0[4] - bf0[5]; + bf1[6] = -bf0[6] + bf0[7]; + bf1[7] = bf0[6] + bf0[7]; + bf1[8] = bf0[8]; + bf1[9] = half_btf(-cospi[16], bf0[9], cospi[48], bf0[14], cos_bit[stage]); + bf1[10] = half_btf(-cospi[48], bf0[10], -cospi[16], bf0[13], cos_bit[stage]); + bf1[11] = bf0[11]; + bf1[12] = bf0[12]; + bf1[13] = half_btf(-cospi[16], bf0[10], cospi[48], bf0[13], cos_bit[stage]); + bf1[14] = half_btf(cospi[48], bf0[9], cospi[16], bf0[14], cos_bit[stage]); + bf1[15] = bf0[15]; + bf1[16] = bf0[16] + bf0[19]; + bf1[17] = bf0[17] + bf0[18]; + bf1[18] = bf0[17] - bf0[18]; + bf1[19] = bf0[16] - bf0[19]; + bf1[20] = -bf0[20] + bf0[23]; + bf1[21] = -bf0[21] + bf0[22]; + bf1[22] = bf0[21] + bf0[22]; + bf1[23] = bf0[20] + bf0[23]; + bf1[24] = bf0[24] + bf0[27]; + bf1[25] = bf0[25] + bf0[26]; + bf1[26] = bf0[25] - bf0[26]; + bf1[27] = bf0[24] - bf0[27]; + bf1[28] = -bf0[28] + bf0[31]; + bf1[29] = -bf0[29] + bf0[30]; + bf1[30] = bf0[29] + bf0[30]; + bf1[31] = bf0[28] + bf0[31]; + bf1[32] = bf0[32]; + bf1[33] = bf0[33]; + bf1[34] = half_btf(-cospi[8], bf0[34], cospi[56], bf0[61], cos_bit[stage]); + bf1[35] = half_btf(-cospi[8], bf0[35], cospi[56], bf0[60], cos_bit[stage]); + bf1[36] = half_btf(-cospi[56], bf0[36], -cospi[8], bf0[59], cos_bit[stage]); + bf1[37] = half_btf(-cospi[56], bf0[37], -cospi[8], bf0[58], cos_bit[stage]); + bf1[38] = bf0[38]; + bf1[39] = bf0[39]; + bf1[40] = bf0[40]; + bf1[41] = bf0[41]; + bf1[42] = half_btf(-cospi[40], bf0[42], cospi[24], bf0[53], cos_bit[stage]); + bf1[43] = half_btf(-cospi[40], bf0[43], cospi[24], bf0[52], cos_bit[stage]); + bf1[44] = half_btf(-cospi[24], bf0[44], -cospi[40], bf0[51], cos_bit[stage]); + bf1[45] = half_btf(-cospi[24], bf0[45], -cospi[40], bf0[50], cos_bit[stage]); + bf1[46] = bf0[46]; + bf1[47] = bf0[47]; + bf1[48] = bf0[48]; + bf1[49] = bf0[49]; + bf1[50] = half_btf(-cospi[40], bf0[45], cospi[24], bf0[50], cos_bit[stage]); + bf1[51] = half_btf(-cospi[40], bf0[44], cospi[24], bf0[51], cos_bit[stage]); + bf1[52] = half_btf(cospi[24], bf0[43], cospi[40], bf0[52], cos_bit[stage]); + bf1[53] = half_btf(cospi[24], bf0[42], cospi[40], bf0[53], cos_bit[stage]); + bf1[54] = bf0[54]; + bf1[55] = bf0[55]; + bf1[56] = bf0[56]; + bf1[57] = bf0[57]; + bf1[58] = half_btf(-cospi[8], bf0[37], cospi[56], bf0[58], cos_bit[stage]); + bf1[59] = half_btf(-cospi[8], bf0[36], cospi[56], bf0[59], cos_bit[stage]); + bf1[60] = half_btf(cospi[56], bf0[35], cospi[8], bf0[60], cos_bit[stage]); + bf1[61] = half_btf(cospi[56], bf0[34], cospi[8], bf0[61], cos_bit[stage]); + bf1[62] = bf0[62]; + bf1[63] = bf0[63]; + range_check(stage, input, bf1, size, stage_range[stage]); + + // stage 7 + stage++; + cospi = cospi_arr[cos_bit[stage] - cos_bit_min]; + bf0 = step; + bf1 = output; + bf1[0] = bf0[0] + bf0[3]; + bf1[1] = bf0[1] + bf0[2]; + bf1[2] = bf0[1] - bf0[2]; + bf1[3] = bf0[0] - bf0[3]; + bf1[4] = bf0[4]; + bf1[5] = half_btf(-cospi[32], bf0[5], cospi[32], bf0[6], cos_bit[stage]); + bf1[6] = half_btf(cospi[32], bf0[5], cospi[32], bf0[6], cos_bit[stage]); + bf1[7] = bf0[7]; + bf1[8] = bf0[8] + bf0[11]; + bf1[9] = bf0[9] + bf0[10]; + bf1[10] = bf0[9] - bf0[10]; + bf1[11] = bf0[8] - bf0[11]; + bf1[12] = -bf0[12] + bf0[15]; + bf1[13] = -bf0[13] + bf0[14]; + bf1[14] = bf0[13] + bf0[14]; + bf1[15] = bf0[12] + bf0[15]; + bf1[16] = bf0[16]; + bf1[17] = bf0[17]; + bf1[18] = half_btf(-cospi[16], bf0[18], cospi[48], bf0[29], cos_bit[stage]); + bf1[19] = half_btf(-cospi[16], bf0[19], cospi[48], bf0[28], cos_bit[stage]); + bf1[20] = half_btf(-cospi[48], bf0[20], -cospi[16], bf0[27], cos_bit[stage]); + bf1[21] = half_btf(-cospi[48], bf0[21], -cospi[16], bf0[26], cos_bit[stage]); + bf1[22] = bf0[22]; + bf1[23] = bf0[23]; + bf1[24] = bf0[24]; + bf1[25] = bf0[25]; + bf1[26] = half_btf(-cospi[16], bf0[21], cospi[48], bf0[26], cos_bit[stage]); + bf1[27] = half_btf(-cospi[16], bf0[20], cospi[48], bf0[27], cos_bit[stage]); + bf1[28] = half_btf(cospi[48], bf0[19], cospi[16], bf0[28], cos_bit[stage]); + bf1[29] = half_btf(cospi[48], bf0[18], cospi[16], bf0[29], cos_bit[stage]); + bf1[30] = bf0[30]; + bf1[31] = bf0[31]; + bf1[32] = bf0[32] + bf0[39]; + bf1[33] = bf0[33] + bf0[38]; + bf1[34] = bf0[34] + bf0[37]; + bf1[35] = bf0[35] + bf0[36]; + bf1[36] = bf0[35] - bf0[36]; + bf1[37] = bf0[34] - bf0[37]; + bf1[38] = bf0[33] - bf0[38]; + bf1[39] = bf0[32] - bf0[39]; + bf1[40] = -bf0[40] + bf0[47]; + bf1[41] = -bf0[41] + bf0[46]; + bf1[42] = -bf0[42] + bf0[45]; + bf1[43] = -bf0[43] + bf0[44]; + bf1[44] = bf0[43] + bf0[44]; + bf1[45] = bf0[42] + bf0[45]; + bf1[46] = bf0[41] + bf0[46]; + bf1[47] = bf0[40] + bf0[47]; + bf1[48] = bf0[48] + bf0[55]; + bf1[49] = bf0[49] + bf0[54]; + bf1[50] = bf0[50] + bf0[53]; + bf1[51] = bf0[51] + bf0[52]; + bf1[52] = bf0[51] - bf0[52]; + bf1[53] = bf0[50] - bf0[53]; + bf1[54] = bf0[49] - bf0[54]; + bf1[55] = bf0[48] - bf0[55]; + bf1[56] = -bf0[56] + bf0[63]; + bf1[57] = -bf0[57] + bf0[62]; + bf1[58] = -bf0[58] + bf0[61]; + bf1[59] = -bf0[59] + bf0[60]; + bf1[60] = bf0[59] + bf0[60]; + bf1[61] = bf0[58] + bf0[61]; + bf1[62] = bf0[57] + bf0[62]; + bf1[63] = bf0[56] + bf0[63]; + range_check(stage, input, bf1, size, stage_range[stage]); + + // stage 8 + stage++; + cospi = cospi_arr[cos_bit[stage] - cos_bit_min]; + bf0 = output; + bf1 = step; + bf1[0] = bf0[0] + bf0[7]; + bf1[1] = bf0[1] + bf0[6]; + bf1[2] = bf0[2] + bf0[5]; + bf1[3] = bf0[3] + bf0[4]; + bf1[4] = bf0[3] - bf0[4]; + bf1[5] = bf0[2] - bf0[5]; + bf1[6] = bf0[1] - bf0[6]; + bf1[7] = bf0[0] - bf0[7]; + bf1[8] = bf0[8]; + bf1[9] = bf0[9]; + bf1[10] = half_btf(-cospi[32], bf0[10], cospi[32], bf0[13], cos_bit[stage]); + bf1[11] = half_btf(-cospi[32], bf0[11], cospi[32], bf0[12], cos_bit[stage]); + bf1[12] = half_btf(cospi[32], bf0[11], cospi[32], bf0[12], cos_bit[stage]); + bf1[13] = half_btf(cospi[32], bf0[10], cospi[32], bf0[13], cos_bit[stage]); + bf1[14] = bf0[14]; + bf1[15] = bf0[15]; + bf1[16] = bf0[16] + bf0[23]; + bf1[17] = bf0[17] + bf0[22]; + bf1[18] = bf0[18] + bf0[21]; + bf1[19] = bf0[19] + bf0[20]; + bf1[20] = bf0[19] - bf0[20]; + bf1[21] = bf0[18] - bf0[21]; + bf1[22] = bf0[17] - bf0[22]; + bf1[23] = bf0[16] - bf0[23]; + bf1[24] = -bf0[24] + bf0[31]; + bf1[25] = -bf0[25] + bf0[30]; + bf1[26] = -bf0[26] + bf0[29]; + bf1[27] = -bf0[27] + bf0[28]; + bf1[28] = bf0[27] + bf0[28]; + bf1[29] = bf0[26] + bf0[29]; + bf1[30] = bf0[25] + bf0[30]; + bf1[31] = bf0[24] + bf0[31]; + bf1[32] = bf0[32]; + bf1[33] = bf0[33]; + bf1[34] = bf0[34]; + bf1[35] = bf0[35]; + bf1[36] = half_btf(-cospi[16], bf0[36], cospi[48], bf0[59], cos_bit[stage]); + bf1[37] = half_btf(-cospi[16], bf0[37], cospi[48], bf0[58], cos_bit[stage]); + bf1[38] = half_btf(-cospi[16], bf0[38], cospi[48], bf0[57], cos_bit[stage]); + bf1[39] = half_btf(-cospi[16], bf0[39], cospi[48], bf0[56], cos_bit[stage]); + bf1[40] = half_btf(-cospi[48], bf0[40], -cospi[16], bf0[55], cos_bit[stage]); + bf1[41] = half_btf(-cospi[48], bf0[41], -cospi[16], bf0[54], cos_bit[stage]); + bf1[42] = half_btf(-cospi[48], bf0[42], -cospi[16], bf0[53], cos_bit[stage]); + bf1[43] = half_btf(-cospi[48], bf0[43], -cospi[16], bf0[52], cos_bit[stage]); + bf1[44] = bf0[44]; + bf1[45] = bf0[45]; + bf1[46] = bf0[46]; + bf1[47] = bf0[47]; + bf1[48] = bf0[48]; + bf1[49] = bf0[49]; + bf1[50] = bf0[50]; + bf1[51] = bf0[51]; + bf1[52] = half_btf(-cospi[16], bf0[43], cospi[48], bf0[52], cos_bit[stage]); + bf1[53] = half_btf(-cospi[16], bf0[42], cospi[48], bf0[53], cos_bit[stage]); + bf1[54] = half_btf(-cospi[16], bf0[41], cospi[48], bf0[54], cos_bit[stage]); + bf1[55] = half_btf(-cospi[16], bf0[40], cospi[48], bf0[55], cos_bit[stage]); + bf1[56] = half_btf(cospi[48], bf0[39], cospi[16], bf0[56], cos_bit[stage]); + bf1[57] = half_btf(cospi[48], bf0[38], cospi[16], bf0[57], cos_bit[stage]); + bf1[58] = half_btf(cospi[48], bf0[37], cospi[16], bf0[58], cos_bit[stage]); + bf1[59] = half_btf(cospi[48], bf0[36], cospi[16], bf0[59], cos_bit[stage]); + bf1[60] = bf0[60]; + bf1[61] = bf0[61]; + bf1[62] = bf0[62]; + bf1[63] = bf0[63]; + range_check(stage, input, bf1, size, stage_range[stage]); + + // stage 9 + stage++; + cospi = cospi_arr[cos_bit[stage] - cos_bit_min]; + bf0 = step; + bf1 = output; + bf1[0] = bf0[0] + bf0[15]; + bf1[1] = bf0[1] + bf0[14]; + bf1[2] = bf0[2] + bf0[13]; + bf1[3] = bf0[3] + bf0[12]; + bf1[4] = bf0[4] + bf0[11]; + bf1[5] = bf0[5] + bf0[10]; + bf1[6] = bf0[6] + bf0[9]; + bf1[7] = bf0[7] + bf0[8]; + bf1[8] = bf0[7] - bf0[8]; + bf1[9] = bf0[6] - bf0[9]; + bf1[10] = bf0[5] - bf0[10]; + bf1[11] = bf0[4] - bf0[11]; + bf1[12] = bf0[3] - bf0[12]; + bf1[13] = bf0[2] - bf0[13]; + bf1[14] = bf0[1] - bf0[14]; + bf1[15] = bf0[0] - bf0[15]; + bf1[16] = bf0[16]; + bf1[17] = bf0[17]; + bf1[18] = bf0[18]; + bf1[19] = bf0[19]; + bf1[20] = half_btf(-cospi[32], bf0[20], cospi[32], bf0[27], cos_bit[stage]); + bf1[21] = half_btf(-cospi[32], bf0[21], cospi[32], bf0[26], cos_bit[stage]); + bf1[22] = half_btf(-cospi[32], bf0[22], cospi[32], bf0[25], cos_bit[stage]); + bf1[23] = half_btf(-cospi[32], bf0[23], cospi[32], bf0[24], cos_bit[stage]); + bf1[24] = half_btf(cospi[32], bf0[23], cospi[32], bf0[24], cos_bit[stage]); + bf1[25] = half_btf(cospi[32], bf0[22], cospi[32], bf0[25], cos_bit[stage]); + bf1[26] = half_btf(cospi[32], bf0[21], cospi[32], bf0[26], cos_bit[stage]); + bf1[27] = half_btf(cospi[32], bf0[20], cospi[32], bf0[27], cos_bit[stage]); + bf1[28] = bf0[28]; + bf1[29] = bf0[29]; + bf1[30] = bf0[30]; + bf1[31] = bf0[31]; + bf1[32] = bf0[32] + bf0[47]; + bf1[33] = bf0[33] + bf0[46]; + bf1[34] = bf0[34] + bf0[45]; + bf1[35] = bf0[35] + bf0[44]; + bf1[36] = bf0[36] + bf0[43]; + bf1[37] = bf0[37] + bf0[42]; + bf1[38] = bf0[38] + bf0[41]; + bf1[39] = bf0[39] + bf0[40]; + bf1[40] = bf0[39] - bf0[40]; + bf1[41] = bf0[38] - bf0[41]; + bf1[42] = bf0[37] - bf0[42]; + bf1[43] = bf0[36] - bf0[43]; + bf1[44] = bf0[35] - bf0[44]; + bf1[45] = bf0[34] - bf0[45]; + bf1[46] = bf0[33] - bf0[46]; + bf1[47] = bf0[32] - bf0[47]; + bf1[48] = -bf0[48] + bf0[63]; + bf1[49] = -bf0[49] + bf0[62]; + bf1[50] = -bf0[50] + bf0[61]; + bf1[51] = -bf0[51] + bf0[60]; + bf1[52] = -bf0[52] + bf0[59]; + bf1[53] = -bf0[53] + bf0[58]; + bf1[54] = -bf0[54] + bf0[57]; + bf1[55] = -bf0[55] + bf0[56]; + bf1[56] = bf0[55] + bf0[56]; + bf1[57] = bf0[54] + bf0[57]; + bf1[58] = bf0[53] + bf0[58]; + bf1[59] = bf0[52] + bf0[59]; + bf1[60] = bf0[51] + bf0[60]; + bf1[61] = bf0[50] + bf0[61]; + bf1[62] = bf0[49] + bf0[62]; + bf1[63] = bf0[48] + bf0[63]; + range_check(stage, input, bf1, size, stage_range[stage]); + + // stage 10 + stage++; + cospi = cospi_arr[cos_bit[stage] - cos_bit_min]; + bf0 = output; + bf1 = step; + bf1[0] = bf0[0] + bf0[31]; + bf1[1] = bf0[1] + bf0[30]; + bf1[2] = bf0[2] + bf0[29]; + bf1[3] = bf0[3] + bf0[28]; + bf1[4] = bf0[4] + bf0[27]; + bf1[5] = bf0[5] + bf0[26]; + bf1[6] = bf0[6] + bf0[25]; + bf1[7] = bf0[7] + bf0[24]; + bf1[8] = bf0[8] + bf0[23]; + bf1[9] = bf0[9] + bf0[22]; + bf1[10] = bf0[10] + bf0[21]; + bf1[11] = bf0[11] + bf0[20]; + bf1[12] = bf0[12] + bf0[19]; + bf1[13] = bf0[13] + bf0[18]; + bf1[14] = bf0[14] + bf0[17]; + bf1[15] = bf0[15] + bf0[16]; + bf1[16] = bf0[15] - bf0[16]; + bf1[17] = bf0[14] - bf0[17]; + bf1[18] = bf0[13] - bf0[18]; + bf1[19] = bf0[12] - bf0[19]; + bf1[20] = bf0[11] - bf0[20]; + bf1[21] = bf0[10] - bf0[21]; + bf1[22] = bf0[9] - bf0[22]; + bf1[23] = bf0[8] - bf0[23]; + bf1[24] = bf0[7] - bf0[24]; + bf1[25] = bf0[6] - bf0[25]; + bf1[26] = bf0[5] - bf0[26]; + bf1[27] = bf0[4] - bf0[27]; + bf1[28] = bf0[3] - bf0[28]; + bf1[29] = bf0[2] - bf0[29]; + bf1[30] = bf0[1] - bf0[30]; + bf1[31] = bf0[0] - bf0[31]; + bf1[32] = bf0[32]; + bf1[33] = bf0[33]; + bf1[34] = bf0[34]; + bf1[35] = bf0[35]; + bf1[36] = bf0[36]; + bf1[37] = bf0[37]; + bf1[38] = bf0[38]; + bf1[39] = bf0[39]; + bf1[40] = half_btf(-cospi[32], bf0[40], cospi[32], bf0[55], cos_bit[stage]); + bf1[41] = half_btf(-cospi[32], bf0[41], cospi[32], bf0[54], cos_bit[stage]); + bf1[42] = half_btf(-cospi[32], bf0[42], cospi[32], bf0[53], cos_bit[stage]); + bf1[43] = half_btf(-cospi[32], bf0[43], cospi[32], bf0[52], cos_bit[stage]); + bf1[44] = half_btf(-cospi[32], bf0[44], cospi[32], bf0[51], cos_bit[stage]); + bf1[45] = half_btf(-cospi[32], bf0[45], cospi[32], bf0[50], cos_bit[stage]); + bf1[46] = half_btf(-cospi[32], bf0[46], cospi[32], bf0[49], cos_bit[stage]); + bf1[47] = half_btf(-cospi[32], bf0[47], cospi[32], bf0[48], cos_bit[stage]); + bf1[48] = half_btf(cospi[32], bf0[47], cospi[32], bf0[48], cos_bit[stage]); + bf1[49] = half_btf(cospi[32], bf0[46], cospi[32], bf0[49], cos_bit[stage]); + bf1[50] = half_btf(cospi[32], bf0[45], cospi[32], bf0[50], cos_bit[stage]); + bf1[51] = half_btf(cospi[32], bf0[44], cospi[32], bf0[51], cos_bit[stage]); + bf1[52] = half_btf(cospi[32], bf0[43], cospi[32], bf0[52], cos_bit[stage]); + bf1[53] = half_btf(cospi[32], bf0[42], cospi[32], bf0[53], cos_bit[stage]); + bf1[54] = half_btf(cospi[32], bf0[41], cospi[32], bf0[54], cos_bit[stage]); + bf1[55] = half_btf(cospi[32], bf0[40], cospi[32], bf0[55], cos_bit[stage]); + bf1[56] = bf0[56]; + bf1[57] = bf0[57]; + bf1[58] = bf0[58]; + bf1[59] = bf0[59]; + bf1[60] = bf0[60]; + bf1[61] = bf0[61]; + bf1[62] = bf0[62]; + bf1[63] = bf0[63]; + range_check(stage, input, bf1, size, stage_range[stage]); + + // stage 11 + stage++; + cospi = cospi_arr[cos_bit[stage] - cos_bit_min]; + bf0 = step; + bf1 = output; + bf1[0] = bf0[0] + bf0[63]; + bf1[1] = bf0[1] + bf0[62]; + bf1[2] = bf0[2] + bf0[61]; + bf1[3] = bf0[3] + bf0[60]; + bf1[4] = bf0[4] + bf0[59]; + bf1[5] = bf0[5] + bf0[58]; + bf1[6] = bf0[6] + bf0[57]; + bf1[7] = bf0[7] + bf0[56]; + bf1[8] = bf0[8] + bf0[55]; + bf1[9] = bf0[9] + bf0[54]; + bf1[10] = bf0[10] + bf0[53]; + bf1[11] = bf0[11] + bf0[52]; + bf1[12] = bf0[12] + bf0[51]; + bf1[13] = bf0[13] + bf0[50]; + bf1[14] = bf0[14] + bf0[49]; + bf1[15] = bf0[15] + bf0[48]; + bf1[16] = bf0[16] + bf0[47]; + bf1[17] = bf0[17] + bf0[46]; + bf1[18] = bf0[18] + bf0[45]; + bf1[19] = bf0[19] + bf0[44]; + bf1[20] = bf0[20] + bf0[43]; + bf1[21] = bf0[21] + bf0[42]; + bf1[22] = bf0[22] + bf0[41]; + bf1[23] = bf0[23] + bf0[40]; + bf1[24] = bf0[24] + bf0[39]; + bf1[25] = bf0[25] + bf0[38]; + bf1[26] = bf0[26] + bf0[37]; + bf1[27] = bf0[27] + bf0[36]; + bf1[28] = bf0[28] + bf0[35]; + bf1[29] = bf0[29] + bf0[34]; + bf1[30] = bf0[30] + bf0[33]; + bf1[31] = bf0[31] + bf0[32]; + bf1[32] = bf0[31] - bf0[32]; + bf1[33] = bf0[30] - bf0[33]; + bf1[34] = bf0[29] - bf0[34]; + bf1[35] = bf0[28] - bf0[35]; + bf1[36] = bf0[27] - bf0[36]; + bf1[37] = bf0[26] - bf0[37]; + bf1[38] = bf0[25] - bf0[38]; + bf1[39] = bf0[24] - bf0[39]; + bf1[40] = bf0[23] - bf0[40]; + bf1[41] = bf0[22] - bf0[41]; + bf1[42] = bf0[21] - bf0[42]; + bf1[43] = bf0[20] - bf0[43]; + bf1[44] = bf0[19] - bf0[44]; + bf1[45] = bf0[18] - bf0[45]; + bf1[46] = bf0[17] - bf0[46]; + bf1[47] = bf0[16] - bf0[47]; + bf1[48] = bf0[15] - bf0[48]; + bf1[49] = bf0[14] - bf0[49]; + bf1[50] = bf0[13] - bf0[50]; + bf1[51] = bf0[12] - bf0[51]; + bf1[52] = bf0[11] - bf0[52]; + bf1[53] = bf0[10] - bf0[53]; + bf1[54] = bf0[9] - bf0[54]; + bf1[55] = bf0[8] - bf0[55]; + bf1[56] = bf0[7] - bf0[56]; + bf1[57] = bf0[6] - bf0[57]; + bf1[58] = bf0[5] - bf0[58]; + bf1[59] = bf0[4] - bf0[59]; + bf1[60] = bf0[3] - bf0[60]; + bf1[61] = bf0[2] - bf0[61]; + bf1[62] = bf0[1] - bf0[62]; + bf1[63] = bf0[0] - bf0[63]; + range_check(stage, input, bf1, size, stage_range[stage]); +} diff --git a/vp10/common/vp10_inv_txfm1d.h b/vp10/common/vp10_inv_txfm1d.h index 0609b65e3e102a18a8ad136e7bedf32b92fc5b8f..fd547a6822ad5280747877fd6348d2bd2dc4613c 100644 --- a/vp10/common/vp10_inv_txfm1d.h +++ b/vp10/common/vp10_inv_txfm1d.h @@ -25,6 +25,8 @@ void vp10_idct16_new(const int32_t *input, int32_t *output, const int8_t *cos_bit, const int8_t *stage_range); void vp10_idct32_new(const int32_t *input, int32_t *output, const int8_t *cos_bit, const int8_t *stage_range); +void vp10_idct64_new(const int32_t *input, int32_t *output, + const int8_t *cos_bit, const int8_t *stage_range); void vp10_iadst4_new(const int32_t *input, int32_t *output, const int8_t *cos_bit, const int8_t *stage_range); diff --git a/vp10/common/vp10_inv_txfm2d.c b/vp10/common/vp10_inv_txfm2d.c index c894a42b20f54fd43df1c849020a870e08f3a138..cacbd0becca689c393f960496a1eb73f94125f1b 100644 --- a/vp10/common/vp10_inv_txfm2d.c +++ b/vp10/common/vp10_inv_txfm2d.c @@ -96,3 +96,15 @@ void vp10_inv_txfm2d_add_32x32(const int32_t *input, uint16_t *output, inv_txfm2d_add_c(input, (int16_t *)output, stride, cfg, txfm_buf); clamp_block((int16_t *)output, 32, stride, 0, (1 << bd) - 1); } + +void vp10_inv_txfm2d_add_64x64(const int32_t *input, uint16_t *output, + const int stride, const TXFM_2D_CFG *cfg, + const int bd) { + int txfm_buf[64 * 64 + 64 + 64]; + // output contains the prediction signal which is always positive and smaller + // than (1 << bd) - 1 + // since bd < 16-1, therefore we can treat the uint16_t* output buffer as an + // int16_t* + inv_txfm2d_add_c(input, (int16_t *)output, stride, cfg, txfm_buf); + clamp_block((int16_t *)output, 64, stride, 0, (1 << bd) - 1); +} diff --git a/vp10/common/vp10_inv_txfm2d.h b/vp10/common/vp10_inv_txfm2d.h index 1b570efcd464947455cb538283c294396827b84b..fd4b23c19c04895e8c5a7e8f5e7c2ed3d49464e5 100644 --- a/vp10/common/vp10_inv_txfm2d.h +++ b/vp10/common/vp10_inv_txfm2d.h @@ -27,6 +27,9 @@ void vp10_inv_txfm2d_add_16x16(const int32_t *input, uint16_t *output, void vp10_inv_txfm2d_add_32x32(const int32_t *input, uint16_t *output, const int stride, const TXFM_2D_CFG *cfg, const int bd); +void vp10_inv_txfm2d_add_64x64(const int32_t *input, uint16_t *output, + const int stride, const TXFM_2D_CFG *cfg, + const int bd); #ifdef __cplusplus } #endif diff --git a/vp10/common/vp10_inv_txfm2d_cfg.h b/vp10/common/vp10_inv_txfm2d_cfg.h index fc552fe495c634b3fa895cfddd2ac4777b3d116c..a1c48920e13cb7ca7b0b677cf2b91a8906439697 100644 --- a/vp10/common/vp10_inv_txfm2d_cfg.h +++ b/vp10/common/vp10_inv_txfm2d_cfg.h @@ -96,6 +96,29 @@ static const TXFM_2D_CFG inv_txfm_2d_cfg_dct_dct_32 = { vp10_idct32_new, // .txfm_func_col vp10_idct32_new}; // .txfm_func_row; +// ---------------- config inv_dct_dct_64 ---------------- +static int8_t inv_shift_dct_dct_64[2] = {-1, -7}; +static int8_t inv_stage_range_col_dct_dct_64[12] = {19, 19, 19, 19, 19, 19, + 19, 19, 19, 19, 18, 18}; +static int8_t inv_stage_range_row_dct_dct_64[12] = {20, 20, 20, 20, 20, 20, + 20, 20, 20, 20, 20, 20}; +static int8_t inv_cos_bit_col_dct_dct_64[12] = {13, 13, 13, 13, 13, 13, + 13, 13, 13, 13, 13, 14}; +static int8_t inv_cos_bit_row_dct_dct_64[12] = {12, 12, 12, 12, 12, 12, + 12, 12, 12, 12, 12, 12}; + +static const TXFM_2D_CFG inv_txfm_2d_cfg_dct_dct_64 = { + 64, // .txfm_size + 12, // .stage_num_col + 12, // .stage_num_row + inv_shift_dct_dct_64, // .shift + inv_stage_range_col_dct_dct_64, // .stage_range_col + inv_stage_range_row_dct_dct_64, // .stage_range_row + inv_cos_bit_col_dct_dct_64, // .cos_bit_col + inv_cos_bit_row_dct_dct_64, // .cos_bit_row + vp10_idct64_new, // .txfm_func_col + vp10_idct64_new}; // .txfm_func_row; + // ---------------- config inv_dct_adst_4 ---------------- static const int8_t inv_shift_dct_adst_4[2] = {1, -5}; static const int8_t inv_stage_range_col_dct_adst_4[4] = {17, 17, 16, 16};