Skip to content
GitLab
Projects
Groups
Snippets
Help
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
A
aom-rav1e
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Service Desk
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Operations
Operations
Incidents
Environments
Packages & Registries
Packages & Registries
Container Registry
Analytics
Analytics
CI / CD
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
Xiph.Org
aom-rav1e
Commits
2283d370
Commit
2283d370
authored
Oct 02, 2017
by
Urvang Joshi
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
Use TX_TYPE consistently instead of int.
Change-Id: Idf01b14bed4701ce84fa1c127e01560f4764fadb
parent
40030325
Changes
40
Hide whitespace changes
Inline
Side-by-side
Showing
40 changed files
with
993 additions
and
788 deletions
+993
-788
aom_dsp/txfm_common.h
aom_dsp/txfm_common.h
+3
-2
av1/common/arm/neon/iht4x4_add_neon.c
av1/common/arm/neon/iht4x4_add_neon.c
+5
-5
av1/common/arm/neon/iht8x8_add_neon.c
av1/common/arm/neon/iht8x8_add_neon.c
+5
-5
av1/common/av1_fwd_txfm2d.c
av1/common/av1_fwd_txfm2d.c
+24
-24
av1/common/av1_inv_txfm2d.c
av1/common/av1_inv_txfm2d.c
+22
-22
av1/common/av1_rtcd_defs.pl
av1/common/av1_rtcd_defs.pl
+27
-27
av1/common/av1_txfm.h
av1/common/av1_txfm.h
+7
-7
av1/common/idct.c
av1/common/idct.c
+26
-26
av1/common/mips/msa/av1_idct16x16_msa.c
av1/common/mips/msa/av1_idct16x16_msa.c
+1
-1
av1/common/mips/msa/av1_idct4x4_msa.c
av1/common/mips/msa/av1_idct4x4_msa.c
+1
-1
av1/common/mips/msa/av1_idct8x8_msa.c
av1/common/mips/msa/av1_idct8x8_msa.c
+1
-1
av1/common/x86/av1_fwd_txfm2d_sse4.c
av1/common/x86/av1_fwd_txfm2d_sse4.c
+1
-1
av1/common/x86/highbd_inv_txfm_avx2.c
av1/common/x86/highbd_inv_txfm_avx2.c
+1
-1
av1/common/x86/highbd_inv_txfm_sse4.c
av1/common/x86/highbd_inv_txfm_sse4.c
+3
-3
av1/common/x86/hybrid_inv_txfm_avx2.c
av1/common/x86/hybrid_inv_txfm_avx2.c
+1
-1
av1/common/x86/idct_intrin_sse2.c
av1/common/x86/idct_intrin_sse2.c
+9
-9
av1/encoder/dct.c
av1/encoder/dct.c
+19
-19
av1/encoder/hybrid_fwd_txfm.c
av1/encoder/hybrid_fwd_txfm.c
+7
-7
av1/encoder/x86/dct_intrin_sse2.c
av1/encoder/x86/dct_intrin_sse2.c
+10
-10
av1/encoder/x86/highbd_fwd_txfm_sse4.c
av1/encoder/x86/highbd_fwd_txfm_sse4.c
+3
-3
av1/encoder/x86/hybrid_fwd_txfm_avx2.c
av1/encoder/x86/hybrid_fwd_txfm_avx2.c
+2
-2
test/av1_fht16x16_test.cc
test/av1_fht16x16_test.cc
+78
-78
test/av1_fht16x32_test.cc
test/av1_fht16x32_test.cc
+55
-44
test/av1_fht16x8_test.cc
test/av1_fht16x8_test.cc
+57
-33
test/av1_fht32x16_test.cc
test/av1_fht32x16_test.cc
+55
-44
test/av1_fht32x32_test.cc
test/av1_fht32x32_test.cc
+48
-38
test/av1_fht4x4_test.cc
test/av1_fht4x4_test.cc
+52
-41
test/av1_fht4x8_test.cc
test/av1_fht4x8_test.cc
+47
-33
test/av1_fht64x64_test.cc
test/av1_fht64x64_test.cc
+32
-17
test/av1_fht8x16_test.cc
test/av1_fht8x16_test.cc
+57
-33
test/av1_fht8x4_test.cc
test/av1_fht8x4_test.cc
+47
-33
test/av1_fht8x8_test.cc
test/av1_fht8x8_test.cc
+52
-41
test/av1_fwd_txfm2d_test.cc
test/av1_fwd_txfm2d_test.cc
+2
-1
test/av1_highbd_iht_test.cc
test/av1_highbd_iht_test.cc
+4
-4
test/av1_inv_txfm2d_test.cc
test/av1_inv_txfm2d_test.cc
+2
-1
test/av1_txfm_test.h
test/av1_txfm_test.h
+2
-2
test/dct16x16_test.cc
test/dct16x16_test.cc
+58
-45
test/dct32x32_test.cc
test/dct32x32_test.cc
+26
-16
test/fdct4x4_test.cc
test/fdct4x4_test.cc
+77
-58
test/fdct8x8_test.cc
test/fdct8x8_test.cc
+64
-49
No files found.
aom_dsp/txfm_common.h
View file @
2283d370
...
...
@@ -13,6 +13,7 @@
#define AOM_DSP_TXFM_COMMON_H_
#include "aom_dsp/aom_dsp_common.h"
#include "av1/common/enums.h"
// Constants and Macros used by all idct/dct functions
#define DCT_CONST_BITS 14
...
...
@@ -23,8 +24,8 @@
typedef
struct
txfm_param
{
// for both forward and inverse transforms
int
tx_type
;
int
tx_size
;
TX_TYPE
tx_type
;
TX_SIZE
tx_size
;
int
lossless
;
int
bd
;
#if CONFIG_MRC_TX || CONFIG_LGT
...
...
av1/common/arm/neon/iht4x4_add_neon.c
View file @
2283d370
...
...
@@ -148,13 +148,13 @@ void av1_iht4x4_16_add_neon(const tran_low_t *input, uint8_t *dest,
TRANSPOSE4X4
(
&
q8s16
,
&
q9s16
);
int
tx_type
=
txfm_param
->
tx_type
;
const
TX_TYPE
tx_type
=
txfm_param
->
tx_type
;
switch
(
tx_type
)
{
case
0
:
// idct_idct is not supported. Fall back to C
case
DCT_DCT
:
// idct_idct is not supported. Fall back to C
av1_iht4x4_16_add_c
(
input
,
dest
,
dest_stride
,
txfm_param
);
return
;
break
;
case
1
:
// iadst_idct
case
ADST_DCT
:
// iadst_idct
// generate constants
GENERATE_COSINE_CONSTANTS
(
&
d0s16
,
&
d1s16
,
&
d2s16
);
GENERATE_SINE_CONSTANTS
(
&
d3s16
,
&
d4s16
,
&
d5s16
,
&
q3s16
);
...
...
@@ -168,7 +168,7 @@ void av1_iht4x4_16_add_neon(const tran_low_t *input, uint8_t *dest,
// then transform columns
IADST4x4_1D
(
&
d3s16
,
&
d4s16
,
&
d5s16
,
&
q3s16
,
&
q8s16
,
&
q9s16
);
break
;
case
2
:
// idct_iadst
case
DCT_ADST
:
// idct_iadst
// generate constantsyy
GENERATE_COSINE_CONSTANTS
(
&
d0s16
,
&
d1s16
,
&
d2s16
);
GENERATE_SINE_CONSTANTS
(
&
d3s16
,
&
d4s16
,
&
d5s16
,
&
q3s16
);
...
...
@@ -182,7 +182,7 @@ void av1_iht4x4_16_add_neon(const tran_low_t *input, uint8_t *dest,
// then transform columns
IDCT4x4_1D
(
&
d0s16
,
&
d1s16
,
&
d2s16
,
&
q8s16
,
&
q9s16
);
break
;
case
3
:
// iadst_iadst
case
ADST_ADST
:
// iadst_iadst
// generate constants
GENERATE_SINE_CONSTANTS
(
&
d3s16
,
&
d4s16
,
&
d5s16
,
&
q3s16
);
...
...
av1/common/arm/neon/iht8x8_add_neon.c
View file @
2283d370
...
...
@@ -478,13 +478,13 @@ void av1_iht8x8_64_add_neon(const tran_low_t *input, uint8_t *dest,
TRANSPOSE8X8
(
&
q8s16
,
&
q9s16
,
&
q10s16
,
&
q11s16
,
&
q12s16
,
&
q13s16
,
&
q14s16
,
&
q15s16
);
int
tx_type
=
txfm_param
->
tx_type
;
const
TX_TYPE
tx_type
=
txfm_param
->
tx_type
;
switch
(
tx_type
)
{
case
0
:
// idct_idct is not supported. Fall back to C
case
DCT_DCT
:
// idct_idct is not supported. Fall back to C
av1_iht8x8_64_add_c
(
input
,
dest
,
dest_stride
,
txfm_param
);
return
;
break
;
case
1
:
// iadst_idct
case
ADST_DCT
:
// iadst_idct
// generate IDCT constants
// GENERATE_IDCT_CONSTANTS
...
...
@@ -503,7 +503,7 @@ void av1_iht8x8_64_add_neon(const tran_low_t *input, uint8_t *dest,
IADST8X8_1D
(
&
q8s16
,
&
q9s16
,
&
q10s16
,
&
q11s16
,
&
q12s16
,
&
q13s16
,
&
q14s16
,
&
q15s16
);
break
;
case
2
:
// idct_iadst
case
DCT_ADST
:
// idct_iadst
// generate IADST constants
// GENERATE_IADST_CONSTANTS
...
...
@@ -522,7 +522,7 @@ void av1_iht8x8_64_add_neon(const tran_low_t *input, uint8_t *dest,
IDCT8x8_1D
(
&
q8s16
,
&
q9s16
,
&
q10s16
,
&
q11s16
,
&
q12s16
,
&
q13s16
,
&
q14s16
,
&
q15s16
);
break
;
case
3
:
// iadst_iadst
case
ADST_ADST
:
// iadst_iadst
// generate IADST constants
// GENERATE_IADST_CONSTANTS
...
...
av1/common/av1_fwd_txfm2d.c
View file @
2283d370
...
...
@@ -143,13 +143,13 @@ static INLINE void fwd_txfm2d_c(const int16_t *input, int32_t *output,
}
void
av1_fwd_txfm2d_4x8_c
(
const
int16_t
*
input
,
int32_t
*
output
,
int
stride
,
int
tx_type
,
int
bd
)
{
TX_TYPE
tx_type
,
int
bd
)
{
#if CONFIG_TXMG
int32_t
txfm_buf
[
4
*
8
];
int16_t
rinput
[
4
*
8
];
int
tx_size
=
TX_4X8
;
int
rtx_size
=
av1_rotate_tx_size
(
tx_size
);
int
rtx_type
=
av1_rotate_tx_type
(
tx_type
);
TX_TYPE
rtx_type
=
av1_rotate_tx_type
(
tx_type
);
int
w
=
tx_size_wide
[
tx_size
];
int
h
=
tx_size_high
[
tx_size
];
int
rw
=
h
;
...
...
@@ -166,20 +166,20 @@ void av1_fwd_txfm2d_4x8_c(const int16_t *input, int32_t *output, int stride,
}
void
av1_fwd_txfm2d_8x4_c
(
const
int16_t
*
input
,
int32_t
*
output
,
int
stride
,
int
tx_type
,
int
bd
)
{
TX_TYPE
tx_type
,
int
bd
)
{
int32_t
txfm_buf
[
8
*
4
];
TXFM_2D_FLIP_CFG
cfg
=
av1_get_fwd_txfm_cfg
(
tx_type
,
TX_8X4
);
fwd_txfm2d_c
(
input
,
output
,
stride
,
&
cfg
,
txfm_buf
,
bd
);
}
void
av1_fwd_txfm2d_8x16_c
(
const
int16_t
*
input
,
int32_t
*
output
,
int
stride
,
int
tx_type
,
int
bd
)
{
TX_TYPE
tx_type
,
int
bd
)
{
#if CONFIG_TXMG
int32_t
txfm_buf
[
8
*
16
];
int16_t
rinput
[
8
*
16
];
int
tx_size
=
TX_8X16
;
int
rtx_size
=
av1_rotate_tx_size
(
tx_size
);
int
rtx_type
=
av1_rotate_tx_type
(
tx_type
);
TX_TYPE
rtx_type
=
av1_rotate_tx_type
(
tx_type
);
int
w
=
tx_size_wide
[
tx_size
];
int
h
=
tx_size_high
[
tx_size
];
int
rw
=
h
;
...
...
@@ -196,20 +196,20 @@ void av1_fwd_txfm2d_8x16_c(const int16_t *input, int32_t *output, int stride,
}
void
av1_fwd_txfm2d_16x8_c
(
const
int16_t
*
input
,
int32_t
*
output
,
int
stride
,
int
tx_type
,
int
bd
)
{
TX_TYPE
tx_type
,
int
bd
)
{
int32_t
txfm_buf
[
16
*
8
];
TXFM_2D_FLIP_CFG
cfg
=
av1_get_fwd_txfm_cfg
(
tx_type
,
TX_16X8
);
fwd_txfm2d_c
(
input
,
output
,
stride
,
&
cfg
,
txfm_buf
,
bd
);
}
void
av1_fwd_txfm2d_16x32_c
(
const
int16_t
*
input
,
int32_t
*
output
,
int
stride
,
int
tx_type
,
int
bd
)
{
TX_TYPE
tx_type
,
int
bd
)
{
#if CONFIG_TXMG
int32_t
txfm_buf
[
16
*
32
];
int16_t
rinput
[
16
*
32
];
int
tx_size
=
TX_16X32
;
int
rtx_size
=
av1_rotate_tx_size
(
tx_size
);
int
rtx_type
=
av1_rotate_tx_type
(
tx_type
);
TX_TYPE
rtx_type
=
av1_rotate_tx_type
(
tx_type
);
int
w
=
tx_size_wide
[
tx_size
];
int
h
=
tx_size_high
[
tx_size
];
int
rw
=
h
;
...
...
@@ -226,35 +226,35 @@ void av1_fwd_txfm2d_16x32_c(const int16_t *input, int32_t *output, int stride,
}
void
av1_fwd_txfm2d_32x16_c
(
const
int16_t
*
input
,
int32_t
*
output
,
int
stride
,
int
tx_type
,
int
bd
)
{
TX_TYPE
tx_type
,
int
bd
)
{
int32_t
txfm_buf
[
32
*
16
];
TXFM_2D_FLIP_CFG
cfg
=
av1_get_fwd_txfm_cfg
(
tx_type
,
TX_32X16
);
fwd_txfm2d_c
(
input
,
output
,
stride
,
&
cfg
,
txfm_buf
,
bd
);
}
void
av1_fwd_txfm2d_4x4_c
(
const
int16_t
*
input
,
int32_t
*
output
,
int
stride
,
int
tx_type
,
int
bd
)
{
TX_TYPE
tx_type
,
int
bd
)
{
int32_t
txfm_buf
[
4
*
4
];
TXFM_2D_FLIP_CFG
cfg
=
av1_get_fwd_txfm_cfg
(
tx_type
,
TX_4X4
);
fwd_txfm2d_c
(
input
,
output
,
stride
,
&
cfg
,
txfm_buf
,
bd
);
}
void
av1_fwd_txfm2d_8x8_c
(
const
int16_t
*
input
,
int32_t
*
output
,
int
stride
,
int
tx_type
,
int
bd
)
{
TX_TYPE
tx_type
,
int
bd
)
{
int32_t
txfm_buf
[
8
*
8
];
TXFM_2D_FLIP_CFG
cfg
=
av1_get_fwd_txfm_cfg
(
tx_type
,
TX_8X8
);
fwd_txfm2d_c
(
input
,
output
,
stride
,
&
cfg
,
txfm_buf
,
bd
);
}
void
av1_fwd_txfm2d_16x16_c
(
const
int16_t
*
input
,
int32_t
*
output
,
int
stride
,
int
tx_type
,
int
bd
)
{
TX_TYPE
tx_type
,
int
bd
)
{
int32_t
txfm_buf
[
16
*
16
];
TXFM_2D_FLIP_CFG
cfg
=
av1_get_fwd_txfm_cfg
(
tx_type
,
TX_16X16
);
fwd_txfm2d_c
(
input
,
output
,
stride
,
&
cfg
,
txfm_buf
,
bd
);
}
void
av1_fwd_txfm2d_32x32_c
(
const
int16_t
*
input
,
int32_t
*
output
,
int
stride
,
int
tx_type
,
int
bd
)
{
TX_TYPE
tx_type
,
int
bd
)
{
int32_t
txfm_buf
[
32
*
32
];
TXFM_2D_FLIP_CFG
cfg
=
av1_get_fwd_txfm_cfg
(
tx_type
,
TX_32X32
);
fwd_txfm2d_c
(
input
,
output
,
stride
,
&
cfg
,
txfm_buf
,
bd
);
...
...
@@ -262,21 +262,21 @@ void av1_fwd_txfm2d_32x32_c(const int16_t *input, int32_t *output, int stride,
#if CONFIG_TX64X64
void
av1_fwd_txfm2d_64x64_c
(
const
int16_t
*
input
,
int32_t
*
output
,
int
stride
,
int
tx_type
,
int
bd
)
{
TX_TYPE
tx_type
,
int
bd
)
{
int32_t
txfm_buf
[
64
*
64
];
TXFM_2D_FLIP_CFG
cfg
=
av1_get_fwd_txfm_64x64_cfg
(
tx_type
);
fwd_txfm2d_c
(
input
,
output
,
stride
,
&
cfg
,
txfm_buf
,
bd
);
}
void
av1_fwd_txfm2d_32x64_c
(
const
int16_t
*
input
,
int32_t
*
output
,
int
stride
,
int
tx_type
,
int
bd
)
{
TX_TYPE
tx_type
,
int
bd
)
{
int32_t
txfm_buf
[
32
*
64
];
TXFM_2D_FLIP_CFG
cfg
=
av1_get_fwd_txfm_32x64_cfg
(
tx_type
);
fwd_txfm2d_c
(
input
,
output
,
stride
,
&
cfg
,
txfm_buf
,
bd
);
}
void
av1_fwd_txfm2d_64x32_c
(
const
int16_t
*
input
,
int32_t
*
output
,
int
stride
,
int
tx_type
,
int
bd
)
{
TX_TYPE
tx_type
,
int
bd
)
{
int32_t
txfm_buf
[
64
*
32
];
TXFM_2D_FLIP_CFG
cfg
=
av1_get_fwd_txfm_64x32_cfg
(
tx_type
);
fwd_txfm2d_c
(
input
,
output
,
stride
,
&
cfg
,
txfm_buf
,
bd
);
...
...
@@ -349,11 +349,11 @@ static const TXFM_1D_CFG *fwd_txfm_row_cfg_ls[TX_TYPES_1D][TX_SIZES] = {
#endif // CONFIG_EXT_TX
};
TXFM_2D_FLIP_CFG
av1_get_fwd_txfm_cfg
(
int
tx_type
,
int
tx_size
)
{
TXFM_2D_FLIP_CFG
av1_get_fwd_txfm_cfg
(
TX_TYPE
tx_type
,
int
tx_size
)
{
TXFM_2D_FLIP_CFG
cfg
;
set_flip_cfg
(
tx_type
,
&
cfg
);
const
int
tx_type_col
=
vtx_tab
[
tx_type
];
const
int
tx_type_row
=
htx_tab
[
tx_type
];
const
TX_TYPE_1D
tx_type_col
=
vtx_tab
[
tx_type
];
const
TX_TYPE_1D
tx_type_row
=
htx_tab
[
tx_type
];
const
int
tx_size_col
=
txsize_vert_map
[
tx_size
];
const
int
tx_size_row
=
txsize_horz_map
[
tx_size
];
cfg
.
col_cfg
=
fwd_txfm_col_cfg_ls
[
tx_type_col
][
tx_size_col
];
...
...
@@ -362,9 +362,9 @@ TXFM_2D_FLIP_CFG av1_get_fwd_txfm_cfg(int tx_type, int tx_size) {
}
#if CONFIG_TX64X64
TXFM_2D_FLIP_CFG
av1_get_fwd_txfm_32x64_cfg
(
int
tx_type
)
{
TXFM_2D_FLIP_CFG
av1_get_fwd_txfm_32x64_cfg
(
TX_TYPE
tx_type
)
{
TXFM_2D_FLIP_CFG
cfg
;
const
int
tx_type_row
=
htx_tab
[
tx_type
];
const
TX_TYPE_1D
tx_type_row
=
htx_tab
[
tx_type
];
const
int
tx_size_row
=
txsize_horz_map
[
TX_32X64
];
switch
(
tx_type
)
{
case
DCT_DCT
:
...
...
@@ -378,9 +378,9 @@ TXFM_2D_FLIP_CFG av1_get_fwd_txfm_32x64_cfg(int tx_type) {
return
cfg
;
}
TXFM_2D_FLIP_CFG
av1_get_fwd_txfm_64x32_cfg
(
int
tx_type
)
{
TXFM_2D_FLIP_CFG
av1_get_fwd_txfm_64x32_cfg
(
TX_TYPE
tx_type
)
{
TXFM_2D_FLIP_CFG
cfg
;
const
int
tx_type_col
=
vtx_tab
[
tx_type
];
const
TX_TYPE_1D
tx_type_col
=
vtx_tab
[
tx_type
];
const
int
tx_size_col
=
txsize_vert_map
[
TX_64X32
];
switch
(
tx_type
)
{
case
DCT_DCT
:
...
...
@@ -394,7 +394,7 @@ TXFM_2D_FLIP_CFG av1_get_fwd_txfm_64x32_cfg(int tx_type) {
return
cfg
;
}
TXFM_2D_FLIP_CFG
av1_get_fwd_txfm_64x64_cfg
(
int
tx_type
)
{
TXFM_2D_FLIP_CFG
av1_get_fwd_txfm_64x64_cfg
(
TX_TYPE
tx_type
)
{
TXFM_2D_FLIP_CFG
cfg
;
switch
(
tx_type
)
{
case
DCT_DCT
:
...
...
av1/common/av1_inv_txfm2d.c
View file @
2283d370
...
...
@@ -140,11 +140,11 @@ static const TXFM_1D_CFG *inv_txfm_row_cfg_ls[TX_TYPES_1D][TX_SIZES] = {
#endif // CONFIG_EXT_TX
};
TXFM_2D_FLIP_CFG
av1_get_inv_txfm_cfg
(
int
tx_type
,
int
tx_size
)
{
TXFM_2D_FLIP_CFG
av1_get_inv_txfm_cfg
(
TX_TYPE
tx_type
,
int
tx_size
)
{
TXFM_2D_FLIP_CFG
cfg
;
set_flip_cfg
(
tx_type
,
&
cfg
);
const
int
tx_type_col
=
vtx_tab
[
tx_type
];
const
int
tx_type_row
=
htx_tab
[
tx_type
];
const
TX_TYPE_1D
tx_type_col
=
vtx_tab
[
tx_type
];
const
TX_TYPE_1D
tx_type_row
=
htx_tab
[
tx_type
];
const
int
tx_size_col
=
txsize_vert_map
[
tx_size
];
const
int
tx_size_row
=
txsize_horz_map
[
tx_size
];
cfg
.
col_cfg
=
inv_txfm_col_cfg_ls
[
tx_type_col
][
tx_size_col
];
...
...
@@ -153,7 +153,7 @@ TXFM_2D_FLIP_CFG av1_get_inv_txfm_cfg(int tx_type, int tx_size) {
}
#if CONFIG_TX64X64
TXFM_2D_FLIP_CFG
av1_get_inv_txfm_64x64_cfg
(
int
tx_type
)
{
TXFM_2D_FLIP_CFG
av1_get_inv_txfm_64x64_cfg
(
TX_TYPE
tx_type
)
{
TXFM_2D_FLIP_CFG
cfg
=
{
0
,
0
,
NULL
,
NULL
};
switch
(
tx_type
)
{
case
DCT_DCT
:
...
...
@@ -294,7 +294,7 @@ static INLINE void inv_txfm2d_add_c(const int32_t *input, uint16_t *output,
static
INLINE
void
inv_txfm2d_add_facade
(
const
int32_t
*
input
,
uint16_t
*
output
,
int
stride
,
int32_t
*
txfm_buf
,
int
tx_type
,
int
tx_size
,
int
bd
)
{
TX_TYPE
tx_type
,
int
tx_size
,
int
bd
)
{
TXFM_2D_FLIP_CFG
cfg
=
av1_get_inv_txfm_cfg
(
tx_type
,
tx_size
);
int
tx_size_sqr
=
txsize_sqr_map
[
tx_size
];
inv_txfm2d_add_c
(
input
,
output
,
stride
,
&
cfg
,
txfm_buf
,
...
...
@@ -302,20 +302,20 @@ static INLINE void inv_txfm2d_add_facade(const int32_t *input, uint16_t *output,
}
void
av1_inv_txfm2d_add_4x8_c
(
const
int32_t
*
input
,
uint16_t
*
output
,
int
stride
,
int
tx_type
,
int
bd
)
{
int
stride
,
TX_TYPE
tx_type
,
int
bd
)
{
int
txfm_buf
[
4
*
8
+
8
+
8
];
inv_txfm2d_add_facade
(
input
,
output
,
stride
,
txfm_buf
,
tx_type
,
TX_4X8
,
bd
);
}
void
av1_inv_txfm2d_add_8x4_c
(
const
int32_t
*
input
,
uint16_t
*
output
,
int
stride
,
int
tx_type
,
int
bd
)
{
int
stride
,
TX_TYPE
tx_type
,
int
bd
)
{
#if CONFIG_TXMG
int
txfm_buf
[
8
*
4
+
8
+
8
];
int32_t
rinput
[
8
*
4
];
uint16_t
routput
[
8
*
4
];
int
tx_size
=
TX_8X4
;
int
rtx_size
=
av1_rotate_tx_size
(
tx_size
);
int
rtx_type
=
av1_rotate_tx_type
(
tx_type
);
TX_TYPE
rtx_type
=
av1_rotate_tx_type
(
tx_type
);
int
w
=
tx_size_wide
[
tx_size
];
int
h
=
tx_size_high
[
tx_size
];
int
rw
=
h
;
...
...
@@ -331,20 +331,20 @@ void av1_inv_txfm2d_add_8x4_c(const int32_t *input, uint16_t *output,
}
void
av1_inv_txfm2d_add_8x16_c
(
const
int32_t
*
input
,
uint16_t
*
output
,
int
stride
,
int
tx_type
,
int
bd
)
{
int
stride
,
TX_TYPE
tx_type
,
int
bd
)
{
int
txfm_buf
[
8
*
16
+
16
+
16
];
inv_txfm2d_add_facade
(
input
,
output
,
stride
,
txfm_buf
,
tx_type
,
TX_8X16
,
bd
);
}
void
av1_inv_txfm2d_add_16x8_c
(
const
int32_t
*
input
,
uint16_t
*
output
,
int
stride
,
int
tx_type
,
int
bd
)
{
int
stride
,
TX_TYPE
tx_type
,
int
bd
)
{
#if CONFIG_TXMG
int
txfm_buf
[
16
*
8
+
16
+
16
];
int32_t
rinput
[
16
*
8
];
uint16_t
routput
[
16
*
8
];
int
tx_size
=
TX_16X8
;
int
rtx_size
=
av1_rotate_tx_size
(
tx_size
);
int
rtx_type
=
av1_rotate_tx_type
(
tx_type
);
TX_TYPE
rtx_type
=
av1_rotate_tx_type
(
tx_type
);
int
w
=
tx_size_wide
[
tx_size
];
int
h
=
tx_size_high
[
tx_size
];
int
rw
=
h
;
...
...
@@ -360,20 +360,20 @@ void av1_inv_txfm2d_add_16x8_c(const int32_t *input, uint16_t *output,
}
void
av1_inv_txfm2d_add_16x32_c
(
const
int32_t
*
input
,
uint16_t
*
output
,
int
stride
,
int
tx_type
,
int
bd
)
{
int
stride
,
TX_TYPE
tx_type
,
int
bd
)
{
int
txfm_buf
[
16
*
32
+
32
+
32
];
inv_txfm2d_add_facade
(
input
,
output
,
stride
,
txfm_buf
,
tx_type
,
TX_16X32
,
bd
);
}
void
av1_inv_txfm2d_add_32x16_c
(
const
int32_t
*
input
,
uint16_t
*
output
,
int
stride
,
int
tx_type
,
int
bd
)
{
int
stride
,
TX_TYPE
tx_type
,
int
bd
)
{
#if CONFIG_TXMG
int
txfm_buf
[
32
*
16
+
32
+
32
];
int32_t
rinput
[
32
*
16
];
uint16_t
routput
[
32
*
16
];
int
tx_size
=
TX_32X16
;
int
rtx_size
=
av1_rotate_tx_size
(
tx_size
);
int
rtx_type
=
av1_rotate_tx_type
(
tx_type
);
TX_TYPE
rtx_type
=
av1_rotate_tx_type
(
tx_type
);
int
w
=
tx_size_wide
[
tx_size
];
int
h
=
tx_size_high
[
tx_size
];
int
rw
=
h
;
...
...
@@ -389,45 +389,45 @@ void av1_inv_txfm2d_add_32x16_c(const int32_t *input, uint16_t *output,
}
void
av1_inv_txfm2d_add_4x4_c
(
const
int32_t
*
input
,
uint16_t
*
output
,
int
stride
,
int
tx_type
,
int
bd
)
{
int
stride
,
TX_TYPE
tx_type
,
int
bd
)
{
int
txfm_buf
[
4
*
4
+
4
+
4
];
inv_txfm2d_add_facade
(
input
,
output
,
stride
,
txfm_buf
,
tx_type
,
TX_4X4
,
bd
);
}
void
av1_inv_txfm2d_add_8x8_c
(
const
int32_t
*
input
,
uint16_t
*
output
,
int
stride
,
int
tx_type
,
int
bd
)
{
int
stride
,
TX_TYPE
tx_type
,
int
bd
)
{
int
txfm_buf
[
8
*
8
+
8
+
8
];
inv_txfm2d_add_facade
(
input
,
output
,
stride
,
txfm_buf
,
tx_type
,
TX_8X8
,
bd
);
}
void
av1_inv_txfm2d_add_16x16_c
(
const
int32_t
*
input
,
uint16_t
*
output
,
int
stride
,
int
tx_type
,
int
bd
)
{
int
stride
,
TX_TYPE
tx_type
,
int
bd
)
{
int
txfm_buf
[
16
*
16
+
16
+
16
];
inv_txfm2d_add_facade
(
input
,
output
,
stride
,
txfm_buf
,
tx_type
,
TX_16X16
,
bd
);
}
void
av1_inv_txfm2d_add_32x32_c
(
const
int32_t
*
input
,
uint16_t
*
output
,
int
stride
,
int
tx_type
,
int
bd
)
{
int
stride
,
TX_TYPE
tx_type
,
int
bd
)
{
int
txfm_buf
[
32
*
32
+
32
+
32
];
inv_txfm2d_add_facade
(
input
,
output
,
stride
,
txfm_buf
,
tx_type
,
TX_32X32
,
bd
);
}
#if CONFIG_TX64X64
void
av1_inv_txfm2d_add_64x64_c
(
const
int32_t
*
input
,
uint16_t
*
output
,
int
stride
,
int
tx_type
,
int
bd
)
{
int
stride
,
TX_TYPE
tx_type
,
int
bd
)
{
int
txfm_buf
[
64
*
64
+
64
+
64
];
inv_txfm2d_add_facade
(
input
,
output
,
stride
,
txfm_buf
,
tx_type
,
TX_64X64
,
bd
);
}
void
av1_inv_txfm2d_add_64x32_c
(
const
int32_t
*
input
,
uint16_t
*
output
,
int
stride
,
int
tx_type
,
int
bd
)
{
int
stride
,
TX_TYPE
tx_type
,
int
bd
)
{
#if CONFIG_TXMG
int
txfm_buf
[
64
*
32
+
64
+
64
];
int32_t
rinput
[
64
*
32
];
uint16_t
routput
[
64
*
32
];
int
tx_size
=
TX_64X32
;
int
rtx_size
=
av1_rotate_tx_size
(
tx_size
);
int
rtx_type
=
av1_rotate_tx_type
(
tx_type
);
TX_TYPE
rtx_type
=
av1_rotate_tx_type
(
tx_type
);
int
w
=
tx_size_wide
[
tx_size
];
int
h
=
tx_size_high
[
tx_size
];
int
rw
=
h
;
...
...
@@ -443,7 +443,7 @@ void av1_inv_txfm2d_add_64x32_c(const int32_t *input, uint16_t *output,
}
void
av1_inv_txfm2d_add_32x64_c
(
const
int32_t
*
input
,
uint16_t
*
output
,
int
stride
,
int
tx_type
,
int
bd
)
{
int
stride
,
TX_TYPE
tx_type
,
int
bd
)
{
int
txfm_buf
[
64
*
32
+
64
+
64
];
inv_txfm2d_add_facade
(
input
,
output
,
stride
,
txfm_buf
,
tx_type
,
TX_32X64
,
bd
);
}
...
...
av1/common/av1_rtcd_defs.pl
View file @
2283d370
...
...
@@ -265,32 +265,32 @@ if (aom_config("CONFIG_HIGHBITDEPTH") eq "yes") {
}
#inv txfm
add_proto
qw/void av1_inv_txfm2d_add_4x8/
,
"
const int32_t *input, uint16_t *output, int stride,
int
tx_type, int bd
";
add_proto
qw/void av1_inv_txfm2d_add_8x4/
,
"
const int32_t *input, uint16_t *output, int stride,
int
tx_type, int bd
";
add_proto
qw/void av1_inv_txfm2d_add_8x16/
,
"
const int32_t *input, uint16_t *output, int stride,
int
tx_type, int bd
";
add_proto
qw/void av1_inv_txfm2d_add_16x8/
,
"
const int32_t *input, uint16_t *output, int stride,
int
tx_type, int bd
";
add_proto
qw/void av1_inv_txfm2d_add_16x32/
,
"
const int32_t *input, uint16_t *output, int stride,
int
tx_type, int bd
";
add_proto
qw/void av1_inv_txfm2d_add_32x16/
,
"
const int32_t *input, uint16_t *output, int stride,
int
tx_type, int bd
";
add_proto
qw/void av1_inv_txfm2d_add_4x4/
,
"
const int32_t *input, uint16_t *output, int stride,
int
tx_type, int bd
";
add_proto
qw/void av1_inv_txfm2d_add_4x8/
,
"
const int32_t *input, uint16_t *output, int stride,
TX_TYPE
tx_type, int bd
";
add_proto
qw/void av1_inv_txfm2d_add_8x4/
,
"
const int32_t *input, uint16_t *output, int stride,
TX_TYPE
tx_type, int bd
";
add_proto
qw/void av1_inv_txfm2d_add_8x16/
,
"
const int32_t *input, uint16_t *output, int stride,
TX_TYPE
tx_type, int bd
";
add_proto
qw/void av1_inv_txfm2d_add_16x8/
,
"
const int32_t *input, uint16_t *output, int stride,
TX_TYPE
tx_type, int bd
";
add_proto
qw/void av1_inv_txfm2d_add_16x32/
,
"
const int32_t *input, uint16_t *output, int stride,
TX_TYPE
tx_type, int bd
";
add_proto
qw/void av1_inv_txfm2d_add_32x16/
,
"
const int32_t *input, uint16_t *output, int stride,
TX_TYPE
tx_type, int bd
";
add_proto
qw/void av1_inv_txfm2d_add_4x4/
,
"
const int32_t *input, uint16_t *output, int stride,
TX_TYPE
tx_type, int bd
";
if
(
aom_config
("
CONFIG_DAALA_DCT4
")
ne
"
yes
")
{
specialize
qw/av1_inv_txfm2d_add_4x4 sse4_1/
;
}
add_proto
qw/void av1_inv_txfm2d_add_8x8/
,
"
const int32_t *input, uint16_t *output, int stride,
int
tx_type, int bd
";
add_proto
qw/void av1_inv_txfm2d_add_8x8/
,
"
const int32_t *input, uint16_t *output, int stride,
TX_TYPE
tx_type, int bd
";
if
(
aom_config
("
CONFIG_DAALA_DCT8
")
ne
"
yes
")
{
specialize
qw/av1_inv_txfm2d_add_8x8 sse4_1/
;
}
add_proto
qw/void av1_inv_txfm2d_add_16x16/
,
"
const int32_t *input, uint16_t *output, int stride,
int
tx_type, int bd
";
add_proto
qw/void av1_inv_txfm2d_add_16x16/
,
"
const int32_t *input, uint16_t *output, int stride,
TX_TYPE
tx_type, int bd
";
if
(
aom_config
("
CONFIG_DAALA_DCT16
")
ne
"
yes
")
{
specialize
qw/av1_inv_txfm2d_add_16x16 sse4_1/
;
}
add_proto
qw/void av1_inv_txfm2d_add_32x32/
,
"
const int32_t *input, uint16_t *output, int stride,
int
tx_type, int bd
";
add_proto
qw/void av1_inv_txfm2d_add_32x32/
,
"
const int32_t *input, uint16_t *output, int stride,
TX_TYPE
tx_type, int bd
";
if
(
aom_config
("
CONFIG_DAALA_DCT32
")
ne
"
yes
")
{
specialize
qw/av1_inv_txfm2d_add_32x32 avx2/
;
}
if
(
aom_config
("
CONFIG_TX64X64
")
eq
"
yes
")
{
add_proto
qw/void av1_inv_txfm2d_add_64x64/
,
"
const int32_t *input, uint16_t *output, int stride,
int
tx_type, int bd
";
add_proto
qw/void av1_inv_txfm2d_add_64x32/
,
"
const int32_t *input, uint16_t *output, int stride,
int
tx_type, int bd
";
add_proto
qw/void av1_inv_txfm2d_add_32x64/
,
"
const int32_t *input, uint16_t *output, int stride,
int
tx_type, int bd
";
add_proto
qw/void av1_inv_txfm2d_add_64x64/
,
"
const int32_t *input, uint16_t *output, int stride,
TX_TYPE
tx_type, int bd
";
add_proto
qw/void av1_inv_txfm2d_add_64x32/
,
"
const int32_t *input, uint16_t *output, int stride,
TX_TYPE
tx_type, int bd
";
add_proto
qw/void av1_inv_txfm2d_add_32x64/
,
"
const int32_t *input, uint16_t *output, int stride,
TX_TYPE
tx_type, int bd
";
}
#
...
...
@@ -404,36 +404,36 @@ if (aom_config("CONFIG_AV1_ENCODER") eq "yes") {
}
}
add_proto
qw/void av1_fwd_idtx/
,
"
const int16_t *src_diff, tran_low_t *coeff, int stride, int bsx, int bsy,
int
tx_type
";
add_proto
qw/void av1_fwd_idtx/
,
"
const int16_t *src_diff, tran_low_t *coeff, int stride, int bsx, int bsy,
TX_TYPE
tx_type
";
#fwd txfm
add_proto
qw/void av1_fwd_txfm2d_4x8/
,
"
const int16_t *input, int32_t *output, int stride,
int
tx_type, int bd
";
add_proto
qw/void av1_fwd_txfm2d_8x4/
,
"
const int16_t *input, int32_t *output, int stride,
int
tx_type, int bd
";
add_proto
qw/void av1_fwd_txfm2d_8x16/
,
"
const int16_t *input, int32_t *output, int stride,
int
tx_type, int bd
";
add_proto
qw/void av1_fwd_txfm2d_16x8/
,
"
const int16_t *input, int32_t *output, int stride,
int
tx_type, int bd
";
add_proto
qw/void av1_fwd_txfm2d_16x32/
,
"
const int16_t *input, int32_t *output, int stride,
int
tx_type, int bd
";
add_proto
qw/void av1_fwd_txfm2d_32x16/
,
"
const int16_t *input, int32_t *output, int stride,
int
tx_type, int bd
";
add_proto
qw/void av1_fwd_txfm2d_4x4/
,
"
const int16_t *input, int32_t *output, int stride,
int
tx_type, int bd
";
add_proto
qw/void av1_fwd_txfm2d_4x8/
,
"
const int16_t *input, int32_t *output, int stride,
TX_TYPE
tx_type, int bd
";
add_proto
qw/void av1_fwd_txfm2d_8x4/
,
"
const int16_t *input, int32_t *output, int stride,
TX_TYPE
tx_type, int bd
";
add_proto
qw/void av1_fwd_txfm2d_8x16/
,
"
const int16_t *input, int32_t *output, int stride,
TX_TYPE
tx_type, int bd
";
add_proto
qw/void av1_fwd_txfm2d_16x8/
,
"
const int16_t *input, int32_t *output, int stride,
TX_TYPE
tx_type, int bd
";
add_proto
qw/void av1_fwd_txfm2d_16x32/
,
"
const int16_t *input, int32_t *output, int stride,
TX_TYPE
tx_type, int bd
";
add_proto
qw/void av1_fwd_txfm2d_32x16/
,
"
const int16_t *input, int32_t *output, int stride,
TX_TYPE
tx_type, int bd
";
add_proto
qw/void av1_fwd_txfm2d_4x4/
,
"
const int16_t *input, int32_t *output, int stride,
TX_TYPE
tx_type, int bd
";
if
(
aom_config
("
CONFIG_DAALA_DCT4
")
ne
"
yes
")
{
specialize
qw/av1_fwd_txfm2d_4x4 sse4_1/
;
}
add_proto
qw/void av1_fwd_txfm2d_8x8/
,
"
const int16_t *input, int32_t *output, int stride,
int
tx_type, int bd
";
add_proto
qw/void av1_fwd_txfm2d_8x8/
,
"
const int16_t *input, int32_t *output, int stride,
TX_TYPE
tx_type, int bd
";
if
(
aom_config
("
CONFIG_DAALA_DCT8
")
ne
"
yes
")
{
specialize
qw/av1_fwd_txfm2d_8x8 sse4_1/
;
}
add_proto
qw/void av1_fwd_txfm2d_16x16/
,
"
const int16_t *input, int32_t *output, int stride,
int
tx_type, int bd
";
add_proto
qw/void av1_fwd_txfm2d_16x16/
,
"
const int16_t *input, int32_t *output, int stride,
TX_TYPE
tx_type, int bd
";
if
(
aom_config
("
CONFIG_DAALA_DCT16
")
ne
"
yes
")
{
specialize
qw/av1_fwd_txfm2d_16x16 sse4_1/
;
}
add_proto
qw/void av1_fwd_txfm2d_32x32/
,
"
const int16_t *input, int32_t *output, int stride,
int
tx_type, int bd
";
add_proto
qw/void av1_fwd_txfm2d_32x32/
,
"
const int16_t *input, int32_t *output, int stride,
TX_TYPE
tx_type, int bd
";
if
(
aom_config
("
CONFIG_DAALA_DCT32
")
ne
"
yes
")
{
specialize
qw/av1_fwd_txfm2d_32x32 sse4_1/
;
}
if
(
aom_config
("
CONFIG_TX64X64
")
eq
"
yes
")
{
add_proto
qw/void av1_fwd_txfm2d_32x64/
,
"
const int16_t *input, int32_t *output, int stride,
int
tx_type, int bd
";
add_proto
qw/void av1_fwd_txfm2d_64x32/
,
"
const int16_t *input, int32_t *output, int stride,
int
tx_type, int bd
";
add_proto
qw/void av1_fwd_txfm2d_64x64/
,
"
const int16_t *input, int32_t *output, int stride,
int
tx_type, int bd
";
add_proto
qw/void av1_fwd_txfm2d_32x64/
,
"
const int16_t *input, int32_t *output, int stride,
TX_TYPE
tx_type, int bd
";
add_proto
qw/void av1_fwd_txfm2d_64x32/
,
"
const int16_t *input, int32_t *output, int stride,
TX_TYPE
tx_type, int bd
";
add_proto
qw/void av1_fwd_txfm2d_64x64/
,
"
const int16_t *input, int32_t *output, int stride,
TX_TYPE
tx_type, int bd
";
}
#
# Motion search
...
...
av1/common/av1_txfm.h
View file @
2283d370
...
...
@@ -154,7 +154,7 @@ typedef struct TXFM_2D_FLIP_CFG {
const
TXFM_1D_CFG
*
row_cfg
;
}
TXFM_2D_FLIP_CFG
;
static
INLINE
void
set_flip_cfg
(
int
tx_type
,
TXFM_2D_FLIP_CFG
*
cfg
)
{
static
INLINE
void
set_flip_cfg
(
TX_TYPE
tx_type
,
TXFM_2D_FLIP_CFG
*
cfg
)
{
switch
(
tx_type
)
{
case
DCT_DCT
:
case
ADST_DCT
:
...
...
@@ -225,7 +225,7 @@ static INLINE int av1_rotate_tx_size(int tx_size) {
}
}
static
INLINE
int
av1_rotate_tx_type
(
int
tx_type
)
{
static
INLINE
TX_TYPE
av1_rotate_tx_type
(
TX_TYPE
tx_type
)
{
switch
(
tx_type
)
{
case
DCT_DCT
:
return
DCT_DCT
;
case
ADST_DCT
:
return
DCT_ADST
;
...
...
@@ -354,13 +354,13 @@ void av1_gen_inv_stage_range(int8_t *stage_range_col, int8_t *stage_range_row,
const
TXFM_2D_FLIP_CFG
*
cfg
,
int8_t
fwd_shift
,
int
bd
);
TXFM_2D_FLIP_CFG
av1_get_fwd_txfm_cfg
(
int
tx_type
,
int
tx_size
);
TXFM_2D_FLIP_CFG
av1_get_fwd_txfm_cfg
(
TX_TYPE
tx_type
,
int
tx_size
);
#if CONFIG_TX64X64
TXFM_2D_FLIP_CFG
av1_get_fwd_txfm_64x64_cfg
(
int
tx_type
);
TXFM_2D_FLIP_CFG
av1_get_fwd_txfm_64x32_cfg
(
int
tx_type
);
TXFM_2D_FLIP_CFG
av1_get_fwd_txfm_32x64_cfg
(
int
tx_type
);
TXFM_2D_FLIP_CFG
av1_get_fwd_txfm_64x64_cfg
(
TX_TYPE
tx_type
);
TXFM_2D_FLIP_CFG
av1_get_fwd_txfm_64x32_cfg
(
TX_TYPE
tx_type
);
TXFM_2D_FLIP_CFG
av1_get_fwd_txfm_32x64_cfg
(
TX_TYPE
tx_type
);
#endif // CONFIG_TX64X64
TXFM_2D_FLIP_CFG
av1_get_inv_txfm_cfg
(
int
tx_type
,
int
tx_size
);
TXFM_2D_FLIP_CFG
av1_get_inv_txfm_cfg
(
TX_TYPE
tx_type
,
int
tx_size
);
#ifdef __cplusplus
}
#endif // __cplusplus
...
...
av1/common/idct.c
View file @
2283d370
...
...
@@ -122,7 +122,7 @@ static void ihalfright64_c(const tran_low_t *input, tran_low_t *output) {
// Inverse identity transform and add.
#if CONFIG_EXT_TX
static
void
inv_idtx_add_c
(
const
tran_low_t
*
input
,
uint8_t
*
dest
,
int
stride
,
int
bsx
,
int
bsy
,
int
tx_type
)
{
int
bsx
,
int
bsy
,
TX_TYPE
tx_type
)
{
int
r
,
c
;
const
int
pels
=
bsx
*
bsy
;
const
int
shift
=
3
-
((
pels
>
256
)
+
(
pels
>
1024
));
...
...
@@ -145,7 +145,7 @@ static void inv_idtx_add_c(const tran_low_t *input, uint8_t *dest, int stride,
#if CONFIG_EXT_TX
static
void
maybe_flip_strides
(
uint8_t
**
dst
,
int
*
dstride
,
tran_low_t
**
src
,
int
*
sstride
,
int
tx_type
,
int
sizey
,
int
*
sstride
,
TX_TYPE
tx_type
,
int
sizey
,
int
sizex
)
{
// Note that the transpose of src will be added to dst. In order to LR
// flip the addends (in dst coordinates), we UD flip the src. To UD flip
...
...
@@ -186,7 +186,7 @@ static void maybe_flip_strides(uint8_t **dst, int *dstride, tran_low_t **src,
#if CONFIG_HIGHBITDEPTH
#if CONFIG_EXT_TX && CONFIG_TX64X64
static
void
highbd_inv_idtx_add_c
(
const
tran_low_t
*
input
,
uint8_t
*
dest8
,
int
stride
,
int
bsx
,
int
bsy
,
int
tx_type
,
int
stride
,
int
bsx
,
int
bsy
,
TX_TYPE
tx_type
,
int
bd
)
{
int
r
,
c
;
const
int
pels
=
bsx
*
bsy
;
...
...
@@ -263,7 +263,7 @@ int get_lgt8(const TxfmParam *txfm_param, int is_col,
void
av1_iht4x4_16_add_c
(
const
tran_low_t
*
input
,
uint8_t
*
dest
,
int
stride
,
const
TxfmParam
*
txfm_param
)
{
int
tx_type
=
txfm_param
->
tx_type
;
const
TX_TYPE
tx_type
=
txfm_param
->
tx_type
;
#if CONFIG_MRC_TX
assert
(
tx_type
!=
MRC_DCT
&&
"Invalid tx type for tx size"
);
#endif // CONFIG_MRC_TX
...
...
@@ -386,7 +386,7 @@ void av1_iht4x4_16_add_c(const tran_low_t *input, uint8_t *dest, int stride,
void
av1_iht4x8_32_add_c
(
const
tran_low_t
*
input
,
uint8_t
*
dest
,
int
stride
,
const
TxfmParam
*
txfm_param
)
{
int
tx_type
=
txfm_param
->
tx_type
;
const
TX_TYPE
tx_type
=
txfm_param
->
tx_type
;
#if CONFIG_MRC_TX
assert
(
tx_type
!=
MRC_DCT
&&
"Invalid tx type for tx size"
);
#endif // CONFIG_MRC_TX
...
...
@@ -467,7 +467,7 @@ void av1_iht4x8_32_add_c(const tran_low_t *input, uint8_t *dest, int stride,
void
av1_iht8x4_32_add_c
(
const
tran_low_t
*
input
,
uint8_t
*
dest
,
int
stride
,
const
TxfmParam
*
txfm_param
)
{
int
tx_type
=
txfm_param
->
tx_type
;
const