Commit 5859636f authored by Sebastien Alaiwan's avatar Sebastien Alaiwan

Remove DAALA_TX experiment

This experiment has been abandonned for AV1.

Change-Id: Ief8ed6a51a5e7bac17838ebb7a88d88bbf90a96f
parent 3da65bff
......@@ -28,13 +28,6 @@ static INLINE tran_high_t dct_const_round_shift(tran_high_t input) {
}
static INLINE tran_high_t check_range(tran_high_t input, int bd) {
#if CONFIG_DAALA_TX
// Daala TX coeffs cover a different range from AV1 TX
// all depths: 19 bit integer
const int32_t int_max = (1 << (TX_COEFF_DEPTH + 6)) - 1;
const int32_t int_min = -int_max - 1;
(void)bd;
#else
// AV1 TX case
// - 8 bit: signed 16 bit integer
// - 10 bit: signed 18 bit integer
......@@ -42,7 +35,6 @@ static INLINE tran_high_t check_range(tran_high_t input, int bd) {
// - max quantization error = 1828 << (bd - 8)
const int32_t int_max = (1 << (7 + bd)) - 1 + (914 << (bd - 7));
const int32_t int_min = -int_max - 1;
#endif
#if CONFIG_COEFFICIENT_RANGE_CHECKING
assert(int_min <= input);
assert(input <= int_max);
......@@ -57,9 +49,6 @@ void aom_idct4_c(const tran_low_t *input, tran_low_t *output);
void aom_idct8_c(const tran_low_t *input, tran_low_t *output);
void aom_idct16_c(const tran_low_t *input, tran_low_t *output);
void aom_idct32_c(const tran_low_t *input, tran_low_t *output);
#if CONFIG_TX64X64 && CONFIG_DAALA_TX64
void aom_idct64_c(const tran_low_t *input, tran_low_t *output);
#endif
void aom_iadst4_c(const tran_low_t *input, tran_low_t *output);
void aom_iadst8_c(const tran_low_t *input, tran_low_t *output);
void aom_iadst16_c(const tran_low_t *input, tran_low_t *output);
......
......@@ -70,16 +70,6 @@ set(AOM_AV1_COMMON_SOURCES
"${AOM_ROOT}/av1/common/tile_common.c"
"${AOM_ROOT}/av1/common/tile_common.h")
if (CONFIG_DAALA_TX)
set(AOM_AV1_COMMON_SOURCES
${AOM_AV1_COMMON_SOURCES}
"${AOM_ROOT}/av1/common/daala_tx.c"
"${AOM_ROOT}/av1/common/daala_tx.h"
"${AOM_ROOT}/av1/common/daala_tx_kernels.h"
"${AOM_ROOT}/av1/common/daala_inv_txfm.c"
"${AOM_ROOT}/av1/common/daala_inv_txfm.h")
endif ()
set(AOM_AV1_DECODER_SOURCES
"${AOM_ROOT}/av1/av1_dx_iface.c"
"${AOM_ROOT}/av1/decoder/decodeframe.c"
......@@ -165,13 +155,6 @@ set(AOM_AV1_ENCODER_SOURCES
"${AOM_ROOT}/av1/encoder/tokenize.c"
"${AOM_ROOT}/av1/encoder/tokenize.h")
if (CONFIG_DAALA_TX)
set(AOM_AV1_ENCODER_SOURCES
${AOM_AV1_ENCODER_SOURCES}
"${AOM_ROOT}/av1/encoder/daala_fwd_txfm.c"
"${AOM_ROOT}/av1/encoder/daala_fwd_txfm.h")
endif ()
set(AOM_AV1_COMMON_INTRIN_SSE2
"${AOM_ROOT}/av1/common/x86/idct_intrin_sse2.c")
......@@ -186,12 +169,6 @@ set(AOM_AV1_COMMON_INTRIN_SSE4_1
set(AOM_AV1_COMMON_INTRIN_AVX2
"${AOM_ROOT}/av1/common/x86/highbd_inv_txfm_avx2.c"
"${AOM_ROOT}/av1/common/x86/hybrid_inv_txfm_avx2.c")
if (CONFIG_DAALA_TX)
set(AOM_AV1_COMMON_INTRIN_AVX2
${AOM_AV1_COMMON_INTRIN_AVX2}
"${AOM_ROOT}/av1/common/x86/daala_tx_kernels.h"
"${AOM_ROOT}/av1/common/x86/daala_inv_txfm_avx2.c")
endif ()
set(AOM_AV1_COMMON_INTRIN_MSA
"${AOM_ROOT}/av1/common/mips/msa/av1_idct16x16_msa.c"
......
......@@ -28,10 +28,6 @@ print <<EOF
#include "av1/common/restoration.h"
#endif
#if CONFIG_DAALA_TX
#include "av1/common/daala_inv_txfm.h"
#endif
#if CONFIG_CFL
#include "av1/common/cfl.h"
#endif
......@@ -95,11 +91,11 @@ if (aom_config("CONFIG_LV_MAP") eq "yes") {
# Inverse dct
#
add_proto qw/void av1_iht4x4_16_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, const struct txfm_param *param";
if (aom_config("CONFIG_DAALA_TX4") ne "yes") {
{
specialize qw/av1_iht4x4_16_add sse2/;
}
if (aom_config("CONFIG_DAALA_TX") ne "yes") {
{
add_proto qw/void av1_iht4x8_32_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, const struct txfm_param *param";
specialize qw/av1_iht4x8_32_add sse2/;
......@@ -127,13 +123,13 @@ if (aom_config("CONFIG_DAALA_TX") ne "yes") {
add_proto qw/void av1_iht32x8_256_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, const struct txfm_param *param";
add_proto qw/void av1_iht8x8_64_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, const struct txfm_param *param";
if (aom_config("CONFIG_DAALA_TX8") ne "yes") {
{
specialize qw/av1_iht8x8_64_add sse2/;
}
add_proto qw/void av1_iht16x16_256_add/, "const tran_low_t *input, uint8_t *output, int pitch, const struct txfm_param *param";
if (aom_config("CONFIG_DAALA_TX16") ne "yes") {
{
specialize qw/av1_iht16x16_256_add sse2 avx2/;
}
......@@ -247,19 +243,19 @@ add_proto qw/void av1_inv_txfm2d_add_16x8/, "const int32_t *input, uint16_t *out
add_proto qw/void av1_inv_txfm2d_add_16x32/, "const int32_t *input, uint16_t *output, int stride, TX_TYPE tx_type, int bd";
add_proto qw/void av1_inv_txfm2d_add_32x16/, "const int32_t *input, uint16_t *output, int stride, TX_TYPE tx_type, int bd";
add_proto qw/void av1_inv_txfm2d_add_4x4/, "const int32_t *input, uint16_t *output, int stride, TX_TYPE tx_type, int bd";
if (aom_config("CONFIG_DAALA_TX4") ne "yes") {
{
specialize qw/av1_inv_txfm2d_add_4x4 sse4_1/;
}
add_proto qw/void av1_inv_txfm2d_add_8x8/, "const int32_t *input, uint16_t *output, int stride, TX_TYPE tx_type, int bd";
if (aom_config("CONFIG_DAALA_TX8") ne "yes") {
{
specialize qw/av1_inv_txfm2d_add_8x8 sse4_1/;
}
add_proto qw/void av1_inv_txfm2d_add_16x16/, "const int32_t *input, uint16_t *output, int stride, TX_TYPE tx_type, int bd";
if (aom_config("CONFIG_DAALA_TX16") ne "yes") {
{
specialize qw/av1_inv_txfm2d_add_16x16 sse4_1/;
}
add_proto qw/void av1_inv_txfm2d_add_32x32/, "const int32_t *input, uint16_t *output, int stride, TX_TYPE tx_type, int bd";
if (aom_config("CONFIG_DAALA_TX32") ne "yes") {
{
specialize qw/av1_inv_txfm2d_add_32x32 avx2/;
}
if (aom_config("CONFIG_TX64X64") eq "yes") {
......@@ -300,24 +296,24 @@ if (aom_config("CONFIG_AV1_ENCODER") eq "yes") {
# fdct functions
add_proto qw/void av1_fht4x4/, "const int16_t *input, tran_low_t *output, int stride, struct txfm_param *param";
if (aom_config("CONFIG_DAALA_TX4") ne "yes") {
{
specialize qw/av1_fht4x4 sse2/;
}
add_proto qw/void av1_fwht4x4/, "const int16_t *input, tran_low_t *output, int stride";
add_proto qw/void av1_fht8x8/, "const int16_t *input, tran_low_t *output, int stride, struct txfm_param *param";
if (aom_config("CONFIG_DAALA_TX8") ne "yes") {
{
specialize qw/av1_fht8x8 sse2/;
}
add_proto qw/void av1_fht16x16/, "const int16_t *input, tran_low_t *output, int stride, struct txfm_param *param";
if (aom_config("CONFIG_DAALA_TX16") ne "yes") {
{
specialize qw/av1_fht16x16 sse2 avx2/;
}
add_proto qw/void av1_fht32x32/, "const int16_t *input, tran_low_t *output, int stride, struct txfm_param *param";
if (aom_config("CONFIG_DAALA_TX32") ne "yes") {
{
specialize qw/av1_fht32x32 sse2 avx2/;
}
......@@ -370,19 +366,19 @@ if (aom_config("CONFIG_AV1_ENCODER") eq "yes") {
add_proto qw/void av1_fwd_txfm2d_8x32/, "const int16_t *input, int32_t *output, int stride, TX_TYPE tx_type, int bd";
add_proto qw/void av1_fwd_txfm2d_32x8/, "const int16_t *input, int32_t *output, int stride, TX_TYPE tx_type, int bd";
add_proto qw/void av1_fwd_txfm2d_4x4/, "const int16_t *input, int32_t *output, int stride, TX_TYPE tx_type, int bd";
if (aom_config("CONFIG_DAALA_TX4") ne "yes") {
{
specialize qw/av1_fwd_txfm2d_4x4 sse4_1/;
}
add_proto qw/void av1_fwd_txfm2d_8x8/, "const int16_t *input, int32_t *output, int stride, TX_TYPE tx_type, int bd";
if (aom_config("CONFIG_DAALA_TX8") ne "yes") {
{
specialize qw/av1_fwd_txfm2d_8x8 sse4_1/;
}
add_proto qw/void av1_fwd_txfm2d_16x16/, "const int16_t *input, int32_t *output, int stride, TX_TYPE tx_type, int bd";
if (aom_config("CONFIG_DAALA_TX16") ne "yes") {
{
specialize qw/av1_fwd_txfm2d_16x16 sse4_1/;
}
add_proto qw/void av1_fwd_txfm2d_32x32/, "const int16_t *input, int32_t *output, int stride, TX_TYPE tx_type, int bd";
if (aom_config("CONFIG_DAALA_TX32") ne "yes") {
{
specialize qw/av1_fwd_txfm2d_32x32 sse4_1/;
}
......@@ -433,7 +429,7 @@ if (aom_config("CONFIG_AV1_ENCODER") eq "yes") {
}
add_proto qw/int64_t av1_highbd_block_error/, "const tran_low_t *coeff, const tran_low_t *dqcoeff, intptr_t block_size, int64_t *ssz, int bd";
if (aom_config("CONFIG_DAALA_TX") ne "yes") {
{
specialize qw/av1_highbd_block_error sse2/;
}
......@@ -579,12 +575,6 @@ if (aom_config("CONFIG_INTRA_EDGE") eq "yes") {
}
# DAALA_TX functions
if (aom_config("CONFIG_DAALA_TX") eq "yes") {
add_proto qw/void daala_inv_txfm_add/, "const tran_low_t *input_coeffs, void *output_pixels, int output_stride, TxfmParam *txfm_param";
specialize qw/daala_inv_txfm_add avx2/;
}
# CFL
if (aom_config("CONFIG_CFL") eq "yes") {
add_proto qw/void av1_cfl_subtract/, "int16_t *pred_buf_q3, int width, int height, int16_t avg_q3";
......
......@@ -765,16 +765,6 @@ static INLINE TxSetType get_ext_tx_set_type(TX_SIZE tx_size, BLOCK_SIZE bs,
#endif // USE_TXTYPE_SEARCH_FOR_SUB8X8_IN_CB4X4
if (use_reduced_set)
return is_inter ? EXT_TX_SET_DCT_IDTX : EXT_TX_SET_DTT4_IDTX;
#if CONFIG_DAALA_TX_DST32
if (tx_size_sqr_up > TX_32X32)
return is_inter ? EXT_TX_SET_DCT_IDTX : EXT_TX_SET_DCTONLY;
if (is_inter)
return (tx_size_sqr >= TX_16X16 ? EXT_TX_SET_DTT9_IDTX_1DDCT
: EXT_TX_SET_ALL16);
else
return (tx_size_sqr >= TX_16X16 ? EXT_TX_SET_DTT4_IDTX
: EXT_TX_SET_DTT4_IDTX_1DDCT);
#endif
if (tx_size_sqr_up == TX_32X32)
return is_inter ? EXT_TX_SET_DCT_IDTX : EXT_TX_SET_DCTONLY;
if (is_inter)
......@@ -970,12 +960,8 @@ static INLINE TX_TYPE av1_get_tx_type(PLANE_TYPE plane_type,
if (is_inter_block(mbmi) && !av1_ext_tx_used[tx_set_type][mbmi->tx_type])
return DCT_DCT;
#if CONFIG_DAALA_TX_DST32
if (xd->lossless[mbmi->segment_id] || txsize_sqr_map[tx_size] > TX_32X32)
#else
if (xd->lossless[mbmi->segment_id] || txsize_sqr_map[tx_size] > TX_32X32 ||
(txsize_sqr_map[tx_size] >= TX_32X32 && !is_inter_block(mbmi)))
#endif
return DCT_DCT;
if (plane_type == PLANE_TYPE_Y) {
return mbmi->tx_type;
......@@ -1345,7 +1331,7 @@ static INLINE int av1_get_max_eob(TX_SIZE tx_size) {
if (tx_size == TX_16X64 || tx_size == TX_64X16) {
return 512;
}
#endif // CONFIG_TX64X64 && !CONFIG_DAALA_TX
#endif // CONFIG_TX64X64
return tx_size_2d[tx_size];
}
......
/*
* Copyright (c) 2017, Alliance for Open Media. All rights reserved
*
* This source code is subject to the terms of the BSD 2 Clause License and
* the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
* was not distributed with this source code in the LICENSE file, you can
* obtain it at www.aomedia.org/license/software. If the Alliance for Open
* Media Patent License 1.0 was not distributed with this source code in the
* PATENTS file, you can obtain it at www.aomedia.org/license/patent.
*/
#include "./av1_rtcd.h"
#include "./aom_config.h"
#include "./aom_dsp_rtcd.h"
#include "av1/common/daala_tx.h"
#include "av1/common/daala_inv_txfm.h"
#include "av1/common/idct.h"
#if CONFIG_DAALA_TX
// Complete Daala TX map, sans lossless which is special cased
typedef void (*daala_itx)(od_coeff *, int, const od_coeff[]);
static daala_itx tx_map[TX_SIZES][TX_TYPES] = {
// 4-point transforms
{ od_bin_idct4, od_bin_idst4, od_bin_idst4, od_bin_iidtx4 },
// 8-point transforms
{ od_bin_idct8, od_bin_idst8, od_bin_idst8, od_bin_iidtx8 },
// 16-point transforms
{ od_bin_idct16, od_bin_idst16, od_bin_idst16, od_bin_iidtx16 },
// 32-point transforms
{ od_bin_idct32, od_bin_idst32, od_bin_idst32, od_bin_iidtx32 },
#if CONFIG_TX64X64
// 64-point transforms
{ od_bin_idct64, NULL, NULL, od_bin_iidtx64 },
#endif
};
static int tx_flip(TX_TYPE_1D t) { return t == FLIPADST_1D; }
// Daala TX toplevel inverse entry point. This same function is
// intended for both low and high bitdepth cases with a tran_low_t of
// 32 bits (matching od_coeff), and a passed-in pixel buffer of either
// bytes (hbd=0) or shorts (hbd=1).
void daala_inv_txfm_add_c(const tran_low_t *input_coeffs, void *output_pixels,
int output_stride, TxfmParam *txfm_param) {
const TX_SIZE tx_size = txfm_param->tx_size;
const TX_TYPE tx_type = txfm_param->tx_type;
const int px_depth = txfm_param->bd;
assert(tx_size <= TX_SIZES_ALL);
assert(tx_type <= TX_TYPES);
if (txfm_param->lossless) {
// Transform function special-cased for lossless
assert(tx_type == DCT_DCT);
assert(tx_size == TX_4X4);
if (txfm_param->is_hbd)
// Note that the output pointer in the prototype is uint8, but the
// function converts to short internally
av1_highbd_iwht4x4_add(input_coeffs, output_pixels, output_stride,
txfm_param->eob, px_depth);
else
av1_iwht4x4_add(input_coeffs, output_pixels, output_stride, txfm_param);
} else {
// General TX case
const int downshift = TX_COEFF_DEPTH - px_depth;
assert(downshift >= 0);
assert(sizeof(tran_low_t) == sizeof(od_coeff));
assert(sizeof(tran_low_t) >= 4);
// Hook into existing map translation infrastructure to select
// appropriate TX functions
const int cols = tx_size_wide[tx_size];
const int rows = tx_size_high[tx_size];
const TX_SIZE col_idx = txsize_vert_map[tx_size];
const TX_SIZE row_idx = txsize_horz_map[tx_size];
assert(col_idx <= TX_SIZES);
assert(row_idx <= TX_SIZES);
assert(vtx_tab[tx_type] <= (int)TX_TYPES_1D);
assert(htx_tab[tx_type] <= (int)TX_TYPES_1D);
daala_itx col_tx = tx_map[col_idx][vtx_tab[tx_type]];
daala_itx row_tx = tx_map[row_idx][htx_tab[tx_type]];
int col_flip = tx_flip(vtx_tab[tx_type]);
int row_flip = tx_flip(htx_tab[tx_type]);
od_coeff tmpsq[MAX_TX_SQUARE];
#if CONFIG_TX64X64
tran_low_t pad_input[MAX_TX_SQUARE];
#endif
int r;
int c;
assert(col_tx);
assert(row_tx);
#if CONFIG_TX64X64
if (rows > 32 || cols > 32) {
int avail_rows;
int avail_cols;
// TODO(urvang): Can the same array be reused, instead of using a new
// array?
// Remap 32x32 input into a modified input by:
// - Copying over these values in top-left 32x32 locations.
// - Setting the rest of the locations to 0.
avail_rows = AOMMIN(rows, 32);
avail_cols = AOMMIN(cols, 32);
for (r = 0; r < avail_rows; r++) {
memcpy(pad_input + r * cols, input_coeffs + r * avail_cols,
avail_cols * sizeof(*pad_input));
if (cols > avail_cols) {
memset(pad_input + r * cols + avail_cols, 0,
(cols - avail_cols) * sizeof(*pad_input));
}
}
if (rows > avail_rows) {
memset(pad_input + avail_rows * cols, 0,
(rows - avail_rows) * cols * sizeof(*pad_input));
}
input_coeffs = pad_input;
}
#endif
// Inverse-transform rows
for (r = 0; r < rows; ++r) {
// The output addressing transposes
if (row_flip)
row_tx(tmpsq + r + (rows * cols) - rows, -rows,
input_coeffs + r * cols);
else
row_tx(tmpsq + r, rows, input_coeffs + r * cols);
}
// Inverse-transform columns
for (c = 0; c < cols; ++c) {
// Above transposed, so our cols are now rows
if (col_flip)
col_tx(tmpsq + c * rows + rows - 1, -1, tmpsq + c * rows);
else
col_tx(tmpsq + c * rows, 1, tmpsq + c * rows);
}
// Sum with destination according to bit depth
// The tmpsq array is currently transposed relative to output
if (txfm_param->is_hbd) {
// Destination array is shorts
uint16_t *out16 = CONVERT_TO_SHORTPTR(output_pixels);
for (r = 0; r < rows; ++r)
for (c = 0; c < cols; ++c)
out16[r * output_stride + c] = highbd_clip_pixel_add(
out16[r * output_stride + c],
(tmpsq[c * rows + r] + (1 << downshift >> 1)) >> downshift,
px_depth);
} else {
// Destination array is bytes
uint8_t *out8 = (uint8_t *)output_pixels;
for (r = 0; r < rows; ++r)
for (c = 0; c < cols; ++c)
out8[r * output_stride + c] = clip_pixel_add(
out8[r * output_stride + c],
(tmpsq[c * rows + r] + (1 << downshift >> 1)) >> downshift);
}
}
}
#endif
/*
* Copyright (c) 2017, Alliance for Open Media. All rights reserved
*
* This source code is subject to the terms of the BSD 2 Clause License and
* the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
* was not distributed with this source code in the LICENSE file, you can
* obtain it at www.aomedia.org/license/software. If the Alliance for Open
* Media Patent License 1.0 was not distributed with this source code in the
* PATENTS file, you can obtain it at www.aomedia.org/license/patent.
*/
#ifndef AV1_ENCODER_DAALA_INV_TXFM_H_
#define AV1_ENCODER_DAALA_INV_TXFM_H_
#include "./aom_config.h"
#ifdef __cplusplus
extern "C" {
#endif
void daala_inv_txfm_add_c(const tran_low_t *input_coeffs, void *output_pixels,
int output_stride, TxfmParam *txfm_param);
#ifdef __cplusplus
} // extern "C"
#endif
#endif // AV1_ENCODER_DAALA_INV_TXFM_H_
This diff is collapsed.
#ifndef AOM_DSP_DAALA_TX_H_
#define AOM_DSP_DAALA_TX_H_
#include "aom_dsp/aom_dsp_common.h"
#include "av1/common/odintrin.h"
void daala_fdct4(const tran_low_t *input, tran_low_t *output);
void daala_idct4(const tran_low_t *input, tran_low_t *output);
void daala_fdst4(const tran_low_t *input, tran_low_t *output);
void daala_idst4(const tran_low_t *input, tran_low_t *output);
void daala_idtx4(const tran_low_t *input, tran_low_t *output);
void daala_fdct8(const tran_low_t *input, tran_low_t *output);
void daala_idct8(const tran_low_t *input, tran_low_t *output);
void daala_fdst8(const tran_low_t *input, tran_low_t *output);
void daala_idst8(const tran_low_t *input, tran_low_t *output);
void daala_idtx8(const tran_low_t *input, tran_low_t *output);
void daala_fdct16(const tran_low_t *input, tran_low_t *output);
void daala_idct16(const tran_low_t *input, tran_low_t *output);
void daala_fdst16(const tran_low_t *input, tran_low_t *output);
void daala_idst16(const tran_low_t *input, tran_low_t *output);
void daala_idtx16(const tran_low_t *input, tran_low_t *output);
void daala_fdct32(const tran_low_t *input, tran_low_t *output);
void daala_idct32(const tran_low_t *input, tran_low_t *output);
void daala_fdst32(const tran_low_t *input, tran_low_t *output);
void daala_idst32(const tran_low_t *input, tran_low_t *output);
void daala_idtx32(const tran_low_t *input, tran_low_t *output);
#if CONFIG_TX64X64
void daala_fdct64(const tran_low_t *input, tran_low_t *output);
void daala_idct64(const tran_low_t *input, tran_low_t *output);
void daala_fdst64(const tran_low_t *input, tran_low_t *output);
void daala_idst64(const tran_low_t *input, tran_low_t *output);
void daala_idtx64(const tran_low_t *input, tran_low_t *output);
#endif
void od_bin_fdct4(od_coeff y[4], const od_coeff *x, int xstride);
void od_bin_idct4(od_coeff *x, int xstride, const od_coeff y[4]);
void od_bin_fdst4(od_coeff y[4], const od_coeff *x, int xstride);
void od_bin_idst4(od_coeff *x, int xstride, const od_coeff y[4]);
void od_bin_fidtx4(od_coeff y[4], const od_coeff *x, int xstride);
void od_bin_iidtx4(od_coeff *x, int xstride, const od_coeff y[4]);
void od_bin_fdct8(od_coeff y[8], const od_coeff *x, int xstride);
void od_bin_idct8(od_coeff *x, int xstride, const od_coeff y[8]);
void od_bin_fdst8(od_coeff y[8], const od_coeff *x, int xstride);
void od_bin_idst8(od_coeff *x, int xstride, const od_coeff y[8]);
void od_bin_fidtx8(od_coeff y[8], const od_coeff *x, int xstride);
void od_bin_iidtx8(od_coeff *x, int xstride, const od_coeff y[8]);
void od_bin_fdct16(od_coeff y[16], const od_coeff *x, int xstride);
void od_bin_idct16(od_coeff *x, int xstride, const od_coeff y[16]);
void od_bin_fdst16(od_coeff y[16], const od_coeff *x, int xstride);
void od_bin_idst16(od_coeff *x, int xstride, const od_coeff y[16]);
void od_bin_fidtx16(od_coeff y[16], const od_coeff *x, int xstride);
void od_bin_iidtx16(od_coeff *x, int xstride, const od_coeff y[16]);
void od_bin_fdct32(od_coeff y[32], const od_coeff *x, int xstride);
void od_bin_idct32(od_coeff *x, int xstride, const od_coeff y[32]);
void od_bin_fdst32(od_coeff y[32], const od_coeff *x, int xstride);
void od_bin_idst32(od_coeff *x, int xstride, const od_coeff y[32]);
void od_bin_fidtx32(od_coeff y[32], const od_coeff *x, int xstride);
void od_bin_iidtx32(od_coeff *x, int xstride, const od_coeff y[32]);
#if CONFIG_TX64X64
void od_bin_fdct64(od_coeff y[64], const od_coeff *x, int xstride);
void od_bin_idct64(od_coeff *x, int xstride, const od_coeff y[64]);
void od_bin_fidtx64(od_coeff y[64], const od_coeff *x, int xstride);
void od_bin_iidtx64(od_coeff *x, int xstride, const od_coeff y[64]);
#endif
#endif
This diff is collapsed.
This diff is collapsed.
......@@ -29,10 +29,8 @@ typedef struct {
transform_1d cols, rows; // vertical and horizontal
} transform_2d;
#if !CONFIG_DAALA_TX
#define MAX_TX_SCALE 1
int av1_get_tx_scale(const TX_SIZE tx_size);
#endif
void av1_iwht4x4_add(const tran_low_t *input, uint8_t *dest, int stride,
const TxfmParam *txfm_param);
......
......@@ -410,8 +410,6 @@ static const int16_t ac_qlookup_12_Q3[QINDEX_RANGE] = {
28143, 28687, 29247,
};
#if !CONFIG_DAALA_TX
// Coefficient scaling and quantization with AV1 TX are tailored to
// the AV1 TX transforms. Regardless of the bit-depth of the input,
// the transform stages scale the coefficient values up by a factor of
......@@ -497,83 +495,6 @@ int16_t av1_qindex_from_ac_Q3(int ac_Q3, aom_bit_depth_t bit_depth) {
return QINDEX_RANGE - 1;
}
#else // CONFIG_DAALA_TX
// Daala TX uses a constant effective coefficient depth
// (TX_COEFF_DEPTH) regardless of input pixel bitdepth or transform
// size. This means that coefficient scale and range is identical
// regardless of the bit depth of the pixel input. However, the
// existing encoder heuristics and RDO loop were built expecting a
// quantizer that scales with bitdepth, treating it more as a
// proto-lambda than a quantizer. The assumption that quantizer scale
// increases with bitdepth is spread throughout the encoder.
// For this reason, we need to be able to find an old-style 'Q3'
// quantizer that scales with pixel depth (to be used in encoder
// decision making) as well as the literal quantizer that is used in
// actual quantization/dequantization. That is centralized here.
// Right now, the existing quantization code and setup are not
// particularly well suited to Daala TX. The scale range used by, eg,
// the 12 bit lookups is intentionally larger in order to provide more
// fine control at the top end of the quality range, as 12-bit input
// would be assumed to offer a lower noise floor than an 8-bit input.
// However, the 12-bit lookups assume an effective 15-bit TX depth,
// while we intend to run Daala TX somewhere between 12 and 14. We
// can't simply scale it down, because this would violate the minimum
// allowable quantizer in the current code (4).
// As such, we do the simplest thing for the time being: Always use
// the 8-bit scale range for all inputs and scale the QTX and Q3
// returns accordingly, which will always be no-ops or upshifts. This
// might well work well enough; if not, we'll need to patch quantizer
// scaling to extend the high-bitdepth quality range upward at some
// later date.
int16_t av1_dc_quant_Q3(int qindex, int delta, aom_bit_depth_t bit_depth) {
assert(bit_depth >= 8);
return qindex == 0 ? dc_qlookup_Q3[0]
: // Do not scale lossless
dc_qlookup_Q3[clamp(qindex + delta, 0, MAXQ)] *
(1 << (bit_depth - 8));
}
int16_t av1_ac_quant_Q3(int qindex, int delta, aom_bit_depth_t bit_depth) {
assert(bit_depth >= 8);
return qindex == 0 ? ac_qlookup_Q3[0]
: // Do not scale lossless
ac_qlookup_Q3[clamp(qindex + delta, 0, MAXQ)] *
(1 << (bit_depth - 8));
}
int16_t av1_dc_quant_QTX(int qindex, int delta, aom_bit_depth_t bit_depth) {
(void)bit_depth;
return qindex == 0 ? dc_qlookup_Q3[0]
: // Do not scale lossless
dc_qlookup_Q3[clamp(qindex + delta, 0, MAXQ)] *
(1 << (TX_COEFF_DEPTH - 11));
}
int16_t av1_ac_quant_QTX(int qindex, int delta, aom_bit_depth_t bit_depth) {
(void)bit_depth;
return qindex == 0 ? ac_qlookup_Q3[0]
: // Do not scale lossless
ac_qlookup_Q3[clamp(qindex + delta, 0, MAXQ)] *
(1 << (TX_COEFF_DEPTH - 11));
}
int16_t av1_qindex_from_ac_Q3(int ac_QTX, aom_bit_depth_t bit_depth) {
int i;
const int16_t *tab = ac_qlookup_Q3;
int scale = (1 << (TX_COEFF_DEPTH - 11));
(void)bit_depth;
for (i = 0; i < QINDEX_RANGE; i++) {
if (ac_QTX <= tab[i] * scale) return i;
}
return QINDEX_RANGE - 1;
}
#endif // !CONFIG_DAALA_TX
int av1_get_qindex(const struct segmentation *seg, int segment_id,
int base_qindex) {
if (segfeature_active(seg, segment_id, SEG_LVL_ALT_Q)) {
......
This diff is collapsed.
This diff is collapsed.
......@@ -82,19 +82,13 @@ uint8_t av1_read_coeffs_txb(const AV1_COMMON *const cm, MACROBLOCKD *const xd,
struct macroblockd_plane *const pd = &xd->plane[plane];
const int16_t *const dequant = pd->seg_dequant_QTX[mbmi->segment_id];
tran_low_t *const tcoeffs = pd->dqcoeff;
#if !CONFIG_DAALA_TX
const int shift = av1_get_tx_scale(tx_size);
#endif
#if CONFIG_NEW_QUANT
#if !CONFIG_AOM_QM
const tran_low_t *dqv_val = &dq_val[0][0];
#endif // !CONFIG_AOM_QM
#if CONFIG_DAALA_TX
const int nq_shift = 0;
#else
const int nq_shift = shift;
#endif // CONFIG_DAALA_TX
#endif // CONFIG_NEW_QUANT && !CONFIG_AOM_QM
const int bwl = get_txb_bwl(tx_size);
const int width = get_txb_wide(tx_size);
......@@ -283,9 +277,7 @@ uint8_t av1_read_coeffs_txb(const AV1_COMMON *const cm, MACROBLOCKD *const xd,
#endif // CONFIG_AOM_QM
#else
v = level * dequant[!!c];
#if !CONFIG_DAALA_TX
v = v >> shift;
#endif // !CONFIG_DAALA_TX
#endif // CONFIG_NEW_QUANT
tcoeffs[pos] = v;
} else {
......@@ -351,9 +343,7 @@ uint8_t av1_read_coeffs_txb(const AV1_COMMON *const cm, MACROBLOCKD *const xd,
#endif // CONFIG_AOM_QM
#else
t = *level * dequant[!!pos];
#if !CONFIG_DAALA_TX
t = t >> shift;
#endif // !CONFIG_DAALA_TX
#endif // CONFIG_NEW_QUANT
if (signs[pos]) t = -t;
tcoeffs[pos] = clamp(t, min_value, max_value);
......@@ -374,9 +364,7 @@ uint8_t av1_read_coeffs_txb(const AV1_COMMON *const cm, MACROBLOCKD *const xd,
#endif // CONFIG_AOM_QM
#else
t = t * dequant[!!pos];
#if !CONFIG_DAALA_TX
t = t >> shift;
#endif // !CONFIG_DAALA_TX
#endif // CONFIG_NEW_QUANT
if (signs[pos]) t = -t;
tcoeffs[pos] = clamp(t, min_value, max_value);
......
......@@ -106,16 +106,10 @@ static int decode_coefs(MACROBLOCKD *xd, PLANE_TYPE type, tran_low_t *dqcoeff,
const tran_low_t *dqv_val = &dq_val[0][0];
#endif // CONFIG_NEW_QUANT && !CONFIG_AOM_QM
#if !CONFIG_DAALA_TX
int dq_shift = av1_get_tx_scale(tx_size);
#endif
#if CONFIG_NEW_QUANT
#if CONFIG_DAALA_TX
int nq_shift = 0;
#else
int nq_shift = dq_shift;
#endif // CONFIG_DAALA_TX
#endif // CONFIG_NEW_QUANT
band = *band_translate++;
......@@ -192,11 +186,7 @@ static int decode_coefs(MACROBLOCKD *xd, PLANE_TYPE type, tran_low_t *dqcoeff,
v = av1_dequant_abscoeff_nuq(val, dqv, dqv_val, nq_shift);
#endif // CONFIG_AOM_QM
#else
#if !CONFIG_DAALA_TX
v = (int)(((int64_t)val * dqv) >> dq_shift);
#else
v = val * dqv;
#endif
#endif
v = (int)check_range(av1_read_record_bit(xd->counts, r, ACCT_STR) ? -v : v,
......
/*
* Copyright (c) 2017, Alliance for Open Media. All rights reserved
*
* This source code is subject to the terms of the BSD 2 Clause License and
* the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
* was not distributed with this source code in the LICENSE file, you can