Commit b8a6fd6b authored by hui su's avatar hui su

DPCM intra coding experiment

Encode a block line by line, horizontally or vertically. In the vertical
mode, each row is predicted by the reconsturcted row above;
in the horizontal mode, each column is predicted by the reconstructed
column to the left.

The DPCM modes are enabled automatically for blocks with horizontal or
vertical prediction mode, and 1D transform types (ext-tx).

Change-Id: I133ab6b537fa24a6e314ee1ef1d2fe9bd9d56c13
parent 90ed98f9
......@@ -399,6 +399,13 @@ if (aom_config("CONFIG_HIGHBITDEPTH") ne "yes") {
add_proto qw/void av1_fwd_idtx/, "const int16_t *src_diff, tran_low_t *coeff, int stride, int bs, int tx_type";
if (aom_config("CONFIG_DPCM_INTRA") eq "yes") {
@sizes = (4, 8, 16, 32);
foreach $size (@sizes) {
add_proto "void", "av1_dpcm_ft$size", "const int16_t *input, int stride, TX_TYPE_1D tx_type, tran_low_t *output";
}
}
if (aom_config("CONFIG_HIGHBITDEPTH") eq "yes") {
#fwd txfm
add_proto qw/void av1_fwd_txfm2d_4x4/, "const int16_t *input, int32_t *output, int stride, int tx_type, int bd";
......
......@@ -2917,3 +2917,157 @@ void av1_highbd_inv_txfm_add(const tran_low_t *input, uint8_t *dest, int stride,
}
}
#endif // CONFIG_HIGHBITDEPTH
#if CONFIG_DPCM_INTRA
void av1_dpcm_inv_txfm_add_4_c(const tran_low_t *input, int stride,
TX_TYPE_1D tx_type, uint8_t *dest) {
assert(tx_type < TX_TYPES_1D);
static const transform_1d IHT[] = { aom_idct4_c, aom_iadst4_c, aom_iadst4_c,
iidtx4_c };
const transform_1d inv_tx = IHT[tx_type];
tran_low_t out[4];
inv_tx(input, out);
for (int i = 0; i < 4; ++i) {
out[i] = (tran_low_t)dct_const_round_shift(out[i] * Sqrt2);
dest[i * stride] =
clip_pixel_add(dest[i * stride], ROUND_POWER_OF_TWO(out[i], 4));
}
}
void av1_dpcm_inv_txfm_add_8_c(const tran_low_t *input, int stride,
TX_TYPE_1D tx_type, uint8_t *dest) {
assert(tx_type < TX_TYPES_1D);
static const transform_1d IHT[] = { aom_idct8_c, aom_iadst8_c, aom_iadst8_c,
iidtx8_c };
const transform_1d inv_tx = IHT[tx_type];
tran_low_t out[8];
inv_tx(input, out);
for (int i = 0; i < 8; ++i) {
dest[i * stride] =
clip_pixel_add(dest[i * stride], ROUND_POWER_OF_TWO(out[i], 4));
}
}
void av1_dpcm_inv_txfm_add_16_c(const tran_low_t *input, int stride,
TX_TYPE_1D tx_type, uint8_t *dest) {
assert(tx_type < TX_TYPES_1D);
static const transform_1d IHT[] = { aom_idct16_c, aom_iadst16_c,
aom_iadst16_c, iidtx16_c };
const transform_1d inv_tx = IHT[tx_type];
tran_low_t out[16];
inv_tx(input, out);
for (int i = 0; i < 16; ++i) {
out[i] = (tran_low_t)dct_const_round_shift(out[i] * Sqrt2);
dest[i * stride] =
clip_pixel_add(dest[i * stride], ROUND_POWER_OF_TWO(out[i], 5));
}
}
void av1_dpcm_inv_txfm_add_32_c(const tran_low_t *input, int stride,
TX_TYPE_1D tx_type, uint8_t *dest) {
assert(tx_type < TX_TYPES_1D);
static const transform_1d IHT[] = { aom_idct32_c, ihalfright32_c,
ihalfright32_c, iidtx32_c };
const transform_1d inv_tx = IHT[tx_type];
tran_low_t out[32];
inv_tx(input, out);
for (int i = 0; i < 32; ++i) {
dest[i * stride] =
clip_pixel_add(dest[i * stride], ROUND_POWER_OF_TWO(out[i], 4));
}
}
dpcm_inv_txfm_add_func av1_get_dpcm_inv_txfm_add_func(int tx_length) {
switch (tx_length) {
case 4: return av1_dpcm_inv_txfm_add_4_c;
case 8: return av1_dpcm_inv_txfm_add_8_c;
case 16: return av1_dpcm_inv_txfm_add_16_c;
case 32:
return av1_dpcm_inv_txfm_add_32_c;
// TODO(huisu): add support for TX_64X64.
default: assert(0); return NULL;
}
}
#if CONFIG_HIGHBITDEPTH
void av1_hbd_dpcm_inv_txfm_add_4_c(const tran_low_t *input, int stride,
TX_TYPE_1D tx_type, int bd, uint16_t *dest) {
assert(tx_type < TX_TYPES_1D);
static const highbd_transform_1d IHT[] = { aom_highbd_idct4_c,
aom_highbd_iadst4_c,
aom_highbd_iadst4_c,
highbd_iidtx4_c };
const highbd_transform_1d inv_tx = IHT[tx_type];
tran_low_t out[4];
inv_tx(input, out, bd);
for (int i = 0; i < 4; ++i) {
out[i] = (tran_low_t)dct_const_round_shift(out[i] * Sqrt2);
dest[i * stride] = highbd_clip_pixel_add(dest[i * stride],
ROUND_POWER_OF_TWO(out[i], 4), bd);
}
}
void av1_hbd_dpcm_inv_txfm_add_8_c(const tran_low_t *input, int stride,
TX_TYPE_1D tx_type, int bd, uint16_t *dest) {
static const highbd_transform_1d IHT[] = { aom_highbd_idct8_c,
aom_highbd_iadst8_c,
aom_highbd_iadst8_c,
highbd_iidtx8_c };
assert(tx_type < TX_TYPES_1D);
const highbd_transform_1d inv_tx = IHT[tx_type];
tran_low_t out[8];
inv_tx(input, out, bd);
for (int i = 0; i < 8; ++i) {
dest[i * stride] = highbd_clip_pixel_add(dest[i * stride],
ROUND_POWER_OF_TWO(out[i], 4), bd);
}
}
void av1_hbd_dpcm_inv_txfm_add_16_c(const tran_low_t *input, int stride,
TX_TYPE_1D tx_type, int bd,
uint16_t *dest) {
assert(tx_type < TX_TYPES_1D);
static const highbd_transform_1d IHT[] = { aom_highbd_idct16_c,
aom_highbd_iadst16_c,
aom_highbd_iadst16_c,
highbd_iidtx16_c };
const highbd_transform_1d inv_tx = IHT[tx_type];
tran_low_t out[16];
inv_tx(input, out, bd);
for (int i = 0; i < 16; ++i) {
out[i] = (tran_low_t)dct_const_round_shift(out[i] * Sqrt2);
dest[i * stride] = highbd_clip_pixel_add(dest[i * stride],
ROUND_POWER_OF_TWO(out[i], 5), bd);
}
}
void av1_hbd_dpcm_inv_txfm_add_32_c(const tran_low_t *input, int stride,
TX_TYPE_1D tx_type, int bd,
uint16_t *dest) {
assert(tx_type < TX_TYPES_1D);
static const highbd_transform_1d IHT[] = { aom_highbd_idct32_c,
highbd_ihalfright32_c,
highbd_ihalfright32_c,
highbd_iidtx32_c };
const highbd_transform_1d inv_tx = IHT[tx_type];
tran_low_t out[32];
inv_tx(input, out, bd);
for (int i = 0; i < 32; ++i) {
dest[i * stride] = highbd_clip_pixel_add(dest[i * stride],
ROUND_POWER_OF_TWO(out[i], 4), bd);
}
}
hbd_dpcm_inv_txfm_add_func av1_get_hbd_dpcm_inv_txfm_add_func(int tx_length) {
switch (tx_length) {
case 4: return av1_hbd_dpcm_inv_txfm_add_4_c;
case 8: return av1_hbd_dpcm_inv_txfm_add_8_c;
case 16: return av1_hbd_dpcm_inv_txfm_add_16_c;
case 32:
return av1_hbd_dpcm_inv_txfm_add_32_c;
// TODO(huisu): add support for TX_64X64.
default: assert(0); return NULL;
}
}
#endif // CONFIG_HIGHBITDEPTH
#endif // CONFIG_DPCM_INTRA
......@@ -84,6 +84,33 @@ void av1_highbd_inv_txfm_add_8x4(const tran_low_t *input, uint8_t *dest,
void av1_highbd_inv_txfm_add(const tran_low_t *input, uint8_t *dest, int stride,
INV_TXFM_PARAM *inv_txfm_param);
#endif // CONFIG_HIGHBITDEPTH
#if CONFIG_DPCM_INTRA
void av1_dpcm_inv_txfm_add_4_c(const tran_low_t *input, int stride,
TX_TYPE_1D tx_type, uint8_t *dest);
void av1_dpcm_inv_txfm_add_8_c(const tran_low_t *input, int stride,
TX_TYPE_1D tx_type, uint8_t *dest);
void av1_dpcm_inv_txfm_add_16_c(const tran_low_t *input, int stride,
TX_TYPE_1D tx_type, uint8_t *dest);
void av1_dpcm_inv_txfm_add_32_c(const tran_low_t *input, int stride,
TX_TYPE_1D tx_type, uint8_t *dest);
typedef void (*dpcm_inv_txfm_add_func)(const tran_low_t *input, int stride,
TX_TYPE_1D tx_type, uint8_t *dest);
dpcm_inv_txfm_add_func av1_get_dpcm_inv_txfm_add_func(int tx_length);
#if CONFIG_HIGHBITDEPTH
void av1_hbd_dpcm_inv_txfm_add_4_c(const tran_low_t *input, int stride,
TX_TYPE_1D tx_type, int bd, uint16_t *dest);
void av1_hbd_dpcm_inv_txfm_add_8_c(const tran_low_t *input, int stride,
TX_TYPE_1D tx_type, int bd, uint16_t *dest);
void av1_hbd_dpcm_inv_txfm_add_16_c(const tran_low_t *input, int stride,
TX_TYPE_1D tx_type, int bd, uint16_t *dest);
void av1_hbd_dpcm_inv_txfm_add_32_c(const tran_low_t *input, int stride,
TX_TYPE_1D tx_type, int bd, uint16_t *dest);
typedef void (*hbd_dpcm_inv_txfm_add_func)(const tran_low_t *input, int stride,
TX_TYPE_1D tx_type, int bd,
uint16_t *dest);
hbd_dpcm_inv_txfm_add_func av1_get_hbd_dpcm_inv_txfm_add_func(int tx_length);
#endif // CONFIG_HIGHBITDEPTH
#endif // CONFIG_DPCM_INTRA
#ifdef __cplusplus
} // extern "C"
#endif
......
......@@ -19,6 +19,20 @@
extern "C" {
#endif
#if CONFIG_DPCM_INTRA
static INLINE int av1_use_dpcm_intra(int plane, PREDICTION_MODE mode,
TX_TYPE tx_type,
const MB_MODE_INFO *const mbmi) {
(void)mbmi;
(void)plane;
#if CONFIG_EXT_INTRA
if (mbmi->sb_type >= BLOCK_8X8 && mbmi->angle_delta[plane != 0]) return 0;
#endif // CONFIG_EXT_INTRA
return (mode == V_PRED && (tx_type == IDTX || tx_type == H_DCT)) ||
(mode == H_PRED && (tx_type == IDTX || tx_type == V_DCT));
}
#endif // CONFIG_DPCM_INTRA
void av1_init_intra_predictors(void);
void av1_predict_intra_block_facade(MACROBLOCKD *xd, int plane, int block_idx,
int blk_col, int blk_row, TX_SIZE tx_size);
......
......@@ -518,6 +518,133 @@ static int get_block_idx(const MACROBLOCKD *xd, int plane, int row, int col) {
return row * max_blocks_wide + col * txh_unit;
}
#if CONFIG_DPCM_INTRA
static void process_block_dpcm_vert(TX_SIZE tx_size, TX_TYPE_1D tx_type_1d,
const tran_low_t *dqcoeff, uint8_t *dst,
int dst_stride) {
const int tx1d_width = tx_size_wide[tx_size];
const int tx1d_height = tx_size_high[tx_size];
dpcm_inv_txfm_add_func inverse_tx =
av1_get_dpcm_inv_txfm_add_func(tx1d_width);
for (int r = 0; r < tx1d_height; ++r) {
if (r > 0) memcpy(dst, dst - dst_stride, tx1d_width * sizeof(dst[0]));
inverse_tx(dqcoeff, 1, tx_type_1d, dst);
dqcoeff += tx1d_width;
dst += dst_stride;
}
}
static void process_block_dpcm_horz(TX_SIZE tx_size, TX_TYPE_1D tx_type_1d,
const tran_low_t *dqcoeff, uint8_t *dst,
int dst_stride) {
const int tx1d_width = tx_size_wide[tx_size];
const int tx1d_height = tx_size_high[tx_size];
dpcm_inv_txfm_add_func inverse_tx =
av1_get_dpcm_inv_txfm_add_func(tx1d_height);
tran_low_t tx_buff[64];
for (int c = 0; c < tx1d_width; ++c, ++dqcoeff, ++dst) {
for (int r = 0; r < tx1d_height; ++r) {
if (c > 0) dst[r * dst_stride] = dst[r * dst_stride - 1];
tx_buff[r] = dqcoeff[r * tx1d_width];
}
inverse_tx(tx_buff, dst_stride, tx_type_1d, dst);
}
}
#if CONFIG_HIGHBITDEPTH
static void hbd_process_block_dpcm_vert(TX_SIZE tx_size, TX_TYPE_1D tx_type_1d,
int bd, const tran_low_t *dqcoeff,
uint8_t *dst8, int dst_stride) {
uint16_t *dst = CONVERT_TO_SHORTPTR(dst8);
const int tx1d_width = tx_size_wide[tx_size];
const int tx1d_height = tx_size_high[tx_size];
hbd_dpcm_inv_txfm_add_func inverse_tx =
av1_get_hbd_dpcm_inv_txfm_add_func(tx1d_width);
for (int r = 0; r < tx1d_height; ++r) {
if (r > 0) memcpy(dst, dst - dst_stride, tx1d_width * sizeof(dst[0]));
inverse_tx(dqcoeff, 1, tx_type_1d, bd, dst);
dqcoeff += tx1d_width;
dst += dst_stride;
}
}
static void hbd_process_block_dpcm_horz(TX_SIZE tx_size, TX_TYPE_1D tx_type_1d,
int bd, const tran_low_t *dqcoeff,
uint8_t *dst8, int dst_stride) {
uint16_t *dst = CONVERT_TO_SHORTPTR(dst8);
const int tx1d_width = tx_size_wide[tx_size];
const int tx1d_height = tx_size_high[tx_size];
hbd_dpcm_inv_txfm_add_func inverse_tx =
av1_get_hbd_dpcm_inv_txfm_add_func(tx1d_height);
tran_low_t tx_buff[64];
switch (tx1d_height) {
case 4: inverse_tx = av1_hbd_dpcm_inv_txfm_add_4_c; break;
case 8: inverse_tx = av1_hbd_dpcm_inv_txfm_add_8_c; break;
case 16: inverse_tx = av1_hbd_dpcm_inv_txfm_add_16_c; break;
case 32: inverse_tx = av1_hbd_dpcm_inv_txfm_add_32_c; break;
default: assert(0);
}
for (int c = 0; c < tx1d_width; ++c, ++dqcoeff, ++dst) {
for (int r = 0; r < tx1d_height; ++r) {
if (c > 0) dst[r * dst_stride] = dst[r * dst_stride - 1];
tx_buff[r] = dqcoeff[r * tx1d_width];
}
inverse_tx(tx_buff, dst_stride, tx_type_1d, bd, dst);
}
}
#endif // CONFIG_HIGHBITDEPTH
static void inverse_transform_block_dpcm(MACROBLOCKD *xd, int plane,
PREDICTION_MODE mode, TX_SIZE tx_size,
TX_TYPE tx_type, uint8_t *dst,
int dst_stride, int16_t scan_line) {
struct macroblockd_plane *const pd = &xd->plane[plane];
tran_low_t *const dqcoeff = pd->dqcoeff;
TX_TYPE_1D tx_type_1d = DCT_1D;
switch (tx_type) {
case IDTX: tx_type_1d = IDTX_1D; break;
case V_DCT:
assert(mode == H_PRED);
tx_type_1d = DCT_1D;
break;
case H_DCT:
assert(mode == V_PRED);
tx_type_1d = DCT_1D;
break;
default: assert(0);
}
switch (mode) {
case V_PRED:
#if CONFIG_HIGHBITDEPTH
if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
hbd_process_block_dpcm_vert(tx_size, tx_type_1d, xd->bd, dqcoeff, dst,
dst_stride);
} else {
#endif // CONFIG_HIGHBITDEPTH
process_block_dpcm_vert(tx_size, tx_type_1d, dqcoeff, dst, dst_stride);
#if CONFIG_HIGHBITDEPTH
}
#endif // CONFIG_HIGHBITDEPTH
break;
case H_PRED:
#if CONFIG_HIGHBITDEPTH
if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
hbd_process_block_dpcm_horz(tx_size, tx_type_1d, xd->bd, dqcoeff, dst,
dst_stride);
} else {
#endif // CONFIG_HIGHBITDEPTH
process_block_dpcm_horz(tx_size, tx_type_1d, dqcoeff, dst, dst_stride);
#if CONFIG_HIGHBITDEPTH
}
#endif // CONFIG_HIGHBITDEPTH
break;
default: assert(0);
}
memset(dqcoeff, 0, (scan_line + 1) * sizeof(dqcoeff[0]));
}
#endif // CONFIG_DPCM_INTRA
static void predict_and_reconstruct_intra_block(
AV1_COMMON *cm, MACROBLOCKD *const xd, aom_reader *const r,
MB_MODE_INFO *const mbmi, int plane, int row, int col, TX_SIZE tx_size) {
......@@ -549,8 +676,22 @@ static void predict_and_reconstruct_intra_block(
if (eob) {
uint8_t *dst =
&pd->dst.buf[(row * pd->dst.stride + col) << tx_size_wide_log2[0]];
inverse_transform_block(xd, plane, tx_type, tx_size, dst, pd->dst.stride,
max_scan_line, eob);
#if CONFIG_DPCM_INTRA
const int block_raster_idx =
av1_block_index_to_raster_order(tx_size, block_idx);
const PREDICTION_MODE mode = (plane == 0)
? get_y_mode(xd->mi[0], block_raster_idx)
: mbmi->uv_mode;
if (av1_use_dpcm_intra(plane, mode, tx_type, mbmi)) {
inverse_transform_block_dpcm(xd, plane, mode, tx_size, tx_type, dst,
pd->dst.stride, max_scan_line);
} else {
#endif // CONFIG_DPCM_INTRA
inverse_transform_block(xd, plane, tx_type, tx_size, dst,
pd->dst.stride, max_scan_line, eob);
#if CONFIG_DPCM_INTRA
}
#endif // CONFIG_DPCM_INTRA
}
#else
TX_TYPE tx_type = get_tx_type(plane_type, xd, block_idx, tx_size);
......
......@@ -2227,4 +2227,49 @@ void av1_highbd_fht64x64_c(const int16_t *input, tran_low_t *output, int stride,
}
#endif // CONFIG_TX64X64
#endif // CONFIG_HIGHBITDEPTH
#if CONFIG_DPCM_INTRA
void av1_dpcm_ft4_c(const int16_t *input, int stride, TX_TYPE_1D tx_type,
tran_low_t *output) {
assert(tx_type < TX_TYPES_1D);
static const transform_1d FHT[] = { fdct4, fadst4, fadst4, fidtx4 };
const transform_1d ft = FHT[tx_type];
tran_low_t temp_in[4];
for (int i = 0; i < 4; ++i)
temp_in[i] = (tran_low_t)fdct_round_shift(input[i * stride] * 4 * Sqrt2);
ft(temp_in, output);
}
void av1_dpcm_ft8_c(const int16_t *input, int stride, TX_TYPE_1D tx_type,
tran_low_t *output) {
assert(tx_type < TX_TYPES_1D);
static const transform_1d FHT[] = { fdct8, fadst8, fadst8, fidtx8 };
const transform_1d ft = FHT[tx_type];
tran_low_t temp_in[8];
for (int i = 0; i < 8; ++i) temp_in[i] = input[i * stride] * 4;
ft(temp_in, output);
}
void av1_dpcm_ft16_c(const int16_t *input, int stride, TX_TYPE_1D tx_type,
tran_low_t *output) {
assert(tx_type < TX_TYPES_1D);
static const transform_1d FHT[] = { fdct16, fadst16, fadst16, fidtx16 };
const transform_1d ft = FHT[tx_type];
tran_low_t temp_in[16];
for (int i = 0; i < 16; ++i)
temp_in[i] = (tran_low_t)fdct_round_shift(input[i * stride] * 2 * Sqrt2);
ft(temp_in, output);
}
void av1_dpcm_ft32_c(const int16_t *input, int stride, TX_TYPE_1D tx_type,
tran_low_t *output) {
assert(tx_type < TX_TYPES_1D);
static const transform_1d FHT[] = { fdct32, fhalfright32, fhalfright32,
fidtx32 };
const transform_1d ft = FHT[tx_type];
tran_low_t temp_in[32];
for (int i = 0; i < 32; ++i) temp_in[i] = input[i * stride];
ft(temp_in, output);
}
#endif // CONFIG_DPCM_INTRA
#endif // !AV1_DCT_GTEST
This diff is collapsed.
......@@ -93,6 +93,15 @@ void av1_predict_intra_block_encoder_facade(MACROBLOCK *x,
BLOCK_SIZE plane_bsize);
#endif
#if CONFIG_DPCM_INTRA
void av1_encode_block_intra_dpcm(const AV1_COMMON *cm, MACROBLOCK *x,
PREDICTION_MODE mode, int plane, int block,
int blk_row, int blk_col,
BLOCK_SIZE plane_bsize, TX_SIZE tx_size,
TX_TYPE tx_type, ENTROPY_CONTEXT *ta,
ENTROPY_CONTEXT *tl, int8_t *skip);
#endif // CONFIG_DPCM_INTRA
#ifdef __cplusplus
} // extern "C"
#endif
......
......@@ -1525,6 +1525,23 @@ static void block_rd_txfm(int plane, int block, int blk_row, int blk_col,
#else
av1_predict_intra_block_facade(xd, plane, block, blk_col, blk_row, tx_size);
#endif
#if CONFIG_DPCM_INTRA
const int block_raster_idx =
av1_block_index_to_raster_order(tx_size, block);
const PREDICTION_MODE mode =
(plane == 0) ? get_y_mode(xd->mi[0], block_raster_idx) : mbmi->uv_mode;
TX_TYPE tx_type = get_tx_type((plane == 0) ? PLANE_TYPE_Y : PLANE_TYPE_UV,
xd, block, tx_size);
if (av1_use_dpcm_intra(plane, mode, tx_type, mbmi)) {
int8_t skip;
av1_encode_block_intra_dpcm(cm, x, mode, plane, block, blk_row, blk_col,
plane_bsize, tx_size, tx_type, a, l, &skip);
av1_dist_block(args->cpi, x, plane, plane_bsize, block, blk_row, blk_col,
tx_size, &this_rd_stats.dist, &this_rd_stats.sse,
OUTPUT_HAS_DECODED_PIXELS);
goto CALCULATE_RD;
}
#endif // CONFIG_DPCM_INTRA
av1_subtract_txb(x, plane, plane_bsize, blk_col, blk_row, tx_size);
}
......@@ -1556,6 +1573,9 @@ static void block_rd_txfm(int plane, int block, int blk_row, int blk_col,
cfl_store(xd->cfl, dst, dst_stride, blk_row, blk_col, tx_size);
}
#endif
#if CONFIG_DPCM_INTRA
CALCULATE_RD : {}
#endif // CONFIG_DPCM_INTRA
rd = RDCOST(x->rdmult, x->rddiv, 0, this_rd_stats.dist);
if (args->this_rd + rd > args->best_rd) {
args->exit_early = 1;
......
......@@ -250,6 +250,7 @@ EXPERIMENT_LIST="
convolve_round
compound_round
ext_tx
dpcm_intra
tx64x64
ext_intra
intra_interp
......@@ -516,6 +517,7 @@ post_process_cmdline() {
enabled smooth_hv && soft_enable alt_intra
enabled intra_edge && enable_feature ext_intra
enabled chroma_2x2 && disable_feature chroma_sub8x8
enabled dpcm_intra && enable_feature ext_tx
if ! enabled daala_ec && ! enabled ans && enabled cfl; then
log_echo "cfl requires daala_ec or ans, so disabling cfl"
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment