Skip to content
GitLab
Explore
Sign in
Register
Primary navigation
Search or go to…
Project
Opus
Manage
Activity
Members
Labels
Plan
Issues
Issue boards
Milestones
Wiki
Code
Merge requests
Repository
Branches
Commits
Tags
Repository graph
Compare revisions
Build
Pipelines
Jobs
Pipeline schedules
Artifacts
Deploy
Releases
Container Registry
Model registry
Operate
Environments
Monitor
Incidents
Analyze
Value stream analytics
Contributor analytics
CI/CD analytics
Repository analytics
Model experiments
Help
Help
Support
GitLab documentation
Compare GitLab plans
Community forum
Contribute to GitLab
Provide feedback
Terms and privacy
Keyboard shortcuts
?
Snippets
Groups
Projects
Show more breadcrumbs
Xiph.Org
Opus
Commits
1032e47d
Verified
Commit
1032e47d
authored
1 year ago
by
Jean-Marc Valin
Browse files
Options
Downloads
Patches
Plain Diff
more cleanup
parent
7f0d456c
No related branches found
Branches containing commit
No related tags found
Tags containing commit
No related merge requests found
Changes
2
Hide whitespace changes
Inline
Side-by-side
Showing
2 changed files
dnn/nnet.h
+0
-55
0 additions, 55 deletions
dnn/nnet.h
dnn/parse_lpcnet_weights.c
+0
-68
0 additions, 68 deletions
dnn/parse_lpcnet_weights.c
with
0 additions
and
123 deletions
dnn/nnet.h
+
0
−
55
View file @
1032e47d
...
...
@@ -92,16 +92,6 @@ typedef struct {
int
activation
;
}
DenseLayer
;
typedef
struct
{
const
float
*
bias
;
const
float
*
input_weights
;
const
float
*
factor
;
int
nb_inputs
;
int
nb_neurons
;
int
nb_channels
;
int
activation
;
}
MDenseLayer
;
typedef
struct
{
const
float
*
bias
;
const
float
*
subias
;
...
...
@@ -114,17 +104,6 @@ typedef struct {
int
reset_after
;
}
GRULayer
;
typedef
struct
{
const
float
*
bias
;
const
float
*
subias
;
const
float
*
diag_weights
;
const
qweight
*
recurrent_weights
;
const
int
*
idx
;
int
nb_neurons
;
int
activation
;
int
reset_after
;
}
SparseGRULayer
;
typedef
struct
{
const
float
*
bias
;
const
float
*
input_weights
;
...
...
@@ -151,8 +130,6 @@ void compute_activation(float *output, const float *input, int N, int activation
void
_lpcnet_compute_dense
(
const
DenseLayer
*
layer
,
float
*
output
,
const
float
*
input
);
void
compute_mdense
(
const
MDenseLayer
*
layer
,
float
*
output
,
const
float
*
input
);
void
compute_gruB
(
const
GRULayer
*
gru
,
const
float
*
gru_b_condition
,
float
*
state
,
const
float
*
input
);
...
...
@@ -184,15 +161,6 @@ int conv2d_init(Conv2dLayer *layer, const WeightArray *arrays,
int
ktime
,
int
kheight
);
int
mdense_init
(
MDenseLayer
*
layer
,
const
WeightArray
*
arrays
,
const
char
*
bias
,
const
char
*
input_weights
,
const
char
*
factor
,
int
nb_inputs
,
int
nb_neurons
,
int
nb_channels
,
int
activation
);
int
dense_init
(
DenseLayer
*
layer
,
const
WeightArray
*
arrays
,
const
char
*
bias
,
const
char
*
input_weights
,
...
...
@@ -211,30 +179,7 @@ int gru_init(GRULayer *layer, const WeightArray *arrays,
int
activation
,
int
reset_after
);
int
sparse_gru_init
(
SparseGRULayer
*
layer
,
const
WeightArray
*
arrays
,
const
char
*
bias
,
const
char
*
subias
,
const
char
*
diag_weights
,
const
char
*
recurrent_weights
,
const
char
*
idx
,
int
nb_neurons
,
int
activation
,
int
reset_after
);
int
conv1d_init
(
Conv1DLayer
*
layer
,
const
WeightArray
*
arrays
,
const
char
*
bias
,
const
char
*
input_weights
,
int
nb_inputs
,
int
kernel_size
,
int
nb_neurons
,
int
activation
);
void
compute_conv2d
(
const
Conv2dLayer
*
conv
,
float
*
out
,
float
*
mem
,
const
float
*
in
,
int
height
,
int
hstride
,
int
activation
);
int
embedding_init
(
EmbeddingLayer
*
layer
,
const
WeightArray
*
arrays
,
const
char
*
embedding_weights
,
int
nb_inputs
,
int
dim
);
#endif
/* _MLP_H_ */
This diff is collapsed.
Click to expand it.
dnn/parse_lpcnet_weights.c
+
0
−
68
View file @
1032e47d
...
...
@@ -175,24 +175,6 @@ int linear_init(LinearLayer *layer, const WeightArray *arrays,
return
0
;
}
int
mdense_init
(
MDenseLayer
*
layer
,
const
WeightArray
*
arrays
,
const
char
*
bias
,
const
char
*
input_weights
,
const
char
*
factor
,
int
nb_inputs
,
int
nb_neurons
,
int
nb_channels
,
int
activation
)
{
if
((
layer
->
bias
=
find_array_check
(
arrays
,
bias
,
nb_neurons
*
nb_channels
*
sizeof
(
layer
->
bias
[
0
])))
==
NULL
)
return
1
;
if
((
layer
->
input_weights
=
find_array_check
(
arrays
,
input_weights
,
nb_inputs
*
nb_channels
*
nb_neurons
*
sizeof
(
layer
->
input_weights
[
0
])))
==
NULL
)
return
1
;
if
((
layer
->
factor
=
find_array_check
(
arrays
,
factor
,
nb_channels
*
nb_neurons
*
sizeof
(
layer
->
factor
[
0
])))
==
NULL
)
return
1
;
layer
->
nb_inputs
=
nb_inputs
;
layer
->
nb_neurons
=
nb_neurons
;
layer
->
nb_channels
=
nb_channels
;
layer
->
activation
=
activation
;
return
0
;
}
int
dense_init
(
DenseLayer
*
layer
,
const
WeightArray
*
arrays
,
const
char
*
bias
,
...
...
@@ -233,45 +215,6 @@ int gru_init(GRULayer *layer, const WeightArray *arrays,
return
0
;
}
int
sparse_gru_init
(
SparseGRULayer
*
layer
,
const
WeightArray
*
arrays
,
const
char
*
bias
,
const
char
*
subias
,
const
char
*
diag_weights
,
const
char
*
recurrent_weights
,
const
char
*
idx
,
int
nb_neurons
,
int
activation
,
int
reset_after
)
{
int
total_blocks
;
if
((
layer
->
bias
=
find_array_check
(
arrays
,
bias
,
6
*
nb_neurons
*
sizeof
(
layer
->
bias
[
0
])))
==
NULL
)
return
1
;
if
((
layer
->
subias
=
find_array_check
(
arrays
,
subias
,
6
*
nb_neurons
*
sizeof
(
layer
->
subias
[
0
])))
==
NULL
)
return
1
;
if
((
layer
->
diag_weights
=
find_array_check
(
arrays
,
diag_weights
,
3
*
nb_neurons
*
sizeof
(
layer
->
diag_weights
[
0
])))
==
NULL
)
return
1
;
if
((
layer
->
idx
=
find_idx_check
(
arrays
,
idx
,
nb_neurons
,
3
*
nb_neurons
,
&
total_blocks
))
==
NULL
)
return
1
;
if
((
layer
->
recurrent_weights
=
find_array_check
(
arrays
,
recurrent_weights
,
SPARSE_BLOCK_SIZE
*
total_blocks
*
sizeof
(
layer
->
recurrent_weights
[
0
])))
==
NULL
)
return
1
;
layer
->
nb_neurons
=
nb_neurons
;
layer
->
activation
=
activation
;
layer
->
reset_after
=
reset_after
;
return
0
;
}
int
conv1d_init
(
Conv1DLayer
*
layer
,
const
WeightArray
*
arrays
,
const
char
*
bias
,
const
char
*
input_weights
,
int
nb_inputs
,
int
kernel_size
,
int
nb_neurons
,
int
activation
)
{
if
((
layer
->
bias
=
find_array_check
(
arrays
,
bias
,
nb_neurons
*
sizeof
(
layer
->
bias
[
0
])))
==
NULL
)
return
1
;
if
((
layer
->
input_weights
=
find_array_check
(
arrays
,
input_weights
,
kernel_size
*
nb_inputs
*
nb_neurons
*
sizeof
(
layer
->
input_weights
[
0
])))
==
NULL
)
return
1
;
layer
->
nb_inputs
=
nb_inputs
;
layer
->
kernel_size
=
kernel_size
;
layer
->
nb_neurons
=
nb_neurons
;
layer
->
activation
=
activation
;
return
0
;
}
int
conv2d_init
(
Conv2dLayer
*
layer
,
const
WeightArray
*
arrays
,
const
char
*
bias
,
const
char
*
float_weights
,
...
...
@@ -297,17 +240,6 @@ int conv2d_init(Conv2dLayer *layer, const WeightArray *arrays,
return
0
;
}
int
embedding_init
(
EmbeddingLayer
*
layer
,
const
WeightArray
*
arrays
,
const
char
*
embedding_weights
,
int
nb_inputs
,
int
dim
)
{
if
((
layer
->
embedding_weights
=
find_array_check
(
arrays
,
embedding_weights
,
nb_inputs
*
dim
*
sizeof
(
layer
->
embedding_weights
[
0
])))
==
NULL
)
return
1
;
layer
->
nb_inputs
=
nb_inputs
;
layer
->
dim
=
dim
;
return
0
;
}
#if 0
...
...
This diff is collapsed.
Click to expand it.
Preview
0%
Loading
Try again
or
attach a new file
.
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Save comment
Cancel
Please
register
or
sign in
to comment