Skip to content
GitLab
Explore
Sign in
Register
Primary navigation
Search or go to…
Project
Opus
Manage
Activity
Members
Labels
Plan
Issues
Issue boards
Milestones
Wiki
Code
Merge requests
Repository
Branches
Commits
Tags
Repository graph
Compare revisions
Build
Pipelines
Jobs
Pipeline schedules
Artifacts
Deploy
Releases
Container Registry
Model registry
Operate
Environments
Monitor
Incidents
Analyze
Value stream analytics
Contributor analytics
CI/CD analytics
Repository analytics
Model experiments
Help
Help
Support
GitLab documentation
Compare GitLab plans
Community forum
Contribute to GitLab
Provide feedback
Terms and privacy
Keyboard shortcuts
?
Snippets
Groups
Projects
Show more breadcrumbs
Xiph.Org
Opus
Commits
32a63fd3
Commit
32a63fd3
authored
3 years ago
by
Jean-Marc Valin
Browse files
Options
Downloads
Patches
Plain Diff
WIP: PLC prediction
parent
c45963d4
No related branches found
Branches containing commit
No related tags found
Tags containing commit
No related merge requests found
Changes
4
Hide whitespace changes
Inline
Side-by-side
Showing
4 changed files
dnn/Makefile.am
+2
-0
2 additions, 0 deletions
dnn/Makefile.am
dnn/lpcnet_plc.c
+21
-1
21 additions, 1 deletion
dnn/lpcnet_plc.c
dnn/lpcnet_private.h
+2
-0
2 additions, 0 deletions
dnn/lpcnet_private.h
dnn/training_tf2/dump_plc.py
+265
-0
265 additions, 0 deletions
dnn/training_tf2/dump_plc.py
with
290 additions
and
1 deletion
dnn/Makefile.am
+
2
−
0
View file @
32a63fd3
...
...
@@ -16,6 +16,7 @@ noinst_HEADERS = arch.h \
lpcnet_private.h
\
opus_types.h
\
nnet_data.h
\
plc_data.h
\
nnet.h
\
pitch.h
\
tansig_table.h
\
...
...
@@ -31,6 +32,7 @@ liblpcnet_la_SOURCES = \
lpcnet_enc.c
\
nnet.c
\
nnet_data.c
\
plc_data.c
\
ceps_codebooks.c
\
pitch.c
\
freq.c
\
...
...
This diff is collapsed.
Click to expand it.
dnn/lpcnet_plc.c
+
21
−
1
View file @
32a63fd3
...
...
@@ -30,9 +30,11 @@
#include
"lpcnet_private.h"
#include
"lpcnet.h"
#include
"plc_data.h"
#define PLC_DUMP_FEATURES 0
#define PLC_READ_FEATURES 0
#define PLC_DNN_PRED 1
LPCNET_EXPORT
int
lpcnet_plc_get_size
()
{
return
sizeof
(
LPCNetPLCState
);
...
...
@@ -58,6 +60,15 @@ LPCNET_EXPORT void lpcnet_plc_destroy(LPCNetPLCState *st) {
free
(
st
);
}
static
void
compute_plc_pred
(
PLCNetState
*
net
,
float
*
out
,
const
float
*
in
)
{
float
zeros
[
1024
]
=
{
0
};
float
dense_out
[
PLC_DENSE1_OUT_SIZE
];
_lpcnet_compute_dense
(
&
plc_dense1
,
dense_out
,
in
);
compute_gruB
(
&
plc_gru1
,
zeros
,
net
->
plc_gru1_state
,
dense_out
);
compute_gruB
(
&
plc_gru2
,
zeros
,
net
->
plc_gru2_state
,
net
->
plc_gru1_state
);
if
(
out
!=
NULL
)
_lpcnet_compute_dense
(
&
plc_out
,
out
,
net
->
plc_gru2_state
);
}
LPCNET_EXPORT
int
lpcnet_plc_update
(
LPCNetPLCState
*
st
,
short
*
pcm
)
{
int
i
;
float
x
[
FRAME_SIZE
];
...
...
@@ -99,6 +110,9 @@ LPCNET_EXPORT int lpcnet_plc_update(LPCNetPLCState *st, short *pcm) {
for
(
i
=
0
;
i
<
FRAME_SIZE
;
i
++
)
st
->
pcm
[
PLC_BUF_SIZE
+
i
]
=
pcm
[
i
];
RNN_COPY
(
output
,
&
st
->
pcm
[
0
],
FRAME_SIZE
);
lpcnet_synthesize_impl
(
&
st
->
lpcnet
,
st
->
enc
.
features
[
0
],
output
,
FRAME_SIZE
,
FRAME_SIZE
);
#if PLC_DNN_PRED
compute_plc_pred
(
&
st
->
plc_net
,
NULL
,
st
->
enc
.
features
[
0
]);
#endif
#if PLC_READ_FEATURES
for
(
i
=
0
;
i
<
NB_FEATURES
;
i
++
)
scanf
(
"%f"
,
&
st
->
features
[
i
]);
#endif
...
...
@@ -106,7 +120,6 @@ LPCNET_EXPORT int lpcnet_plc_update(LPCNetPLCState *st, short *pcm) {
for
(
i
=
0
;
i
<
NB_FEATURES
;
i
++
)
printf
(
"%f "
,
st
->
enc
.
features
[
0
][
i
]);
printf
(
"1
\n
"
);
#endif
RNN_MOVE
(
st
->
pcm
,
&
st
->
pcm
[
FRAME_SIZE
],
PLC_BUF_SIZE
);
}
RNN_COPY
(
st
->
features
,
st
->
enc
.
features
[
0
],
NB_TOTAL_FEATURES
);
...
...
@@ -118,6 +131,7 @@ LPCNET_EXPORT int lpcnet_plc_conceal(LPCNetPLCState *st, short *pcm) {
int
i
;
#endif
short
output
[
FRAME_SIZE
];
float
zeros
[
NB_FEATURES
+
1
]
=
{
0
};
st
->
enc
.
pcount
=
0
;
/* If we concealed the previous frame, finish synthesizing the rest of the samples. */
/* FIXME: Copy/predict features. */
...
...
@@ -126,6 +140,9 @@ LPCNET_EXPORT int lpcnet_plc_conceal(LPCNetPLCState *st, short *pcm) {
int
update_count
;
update_count
=
IMIN
(
st
->
pcm_fill
,
FRAME_SIZE
);
RNN_COPY
(
output
,
&
st
->
pcm
[
0
],
update_count
);
#if PLC_DNN_PRED
compute_plc_pred
(
&
st
->
plc_net
,
st
->
features
,
zeros
);
#endif
#if PLC_READ_FEATURES
for
(
i
=
0
;
i
<
NB_FEATURES
;
i
++
)
scanf
(
"%f"
,
&
st
->
features
[
i
]);
#endif
...
...
@@ -139,6 +156,9 @@ LPCNET_EXPORT int lpcnet_plc_conceal(LPCNetPLCState *st, short *pcm) {
st
->
skip_analysis
++
;
}
lpcnet_synthesize_tail_impl
(
&
st
->
lpcnet
,
pcm
,
FRAME_SIZE
-
TRAINING_OFFSET
,
0
);
#if PLC_DNN_PRED
compute_plc_pred
(
&
st
->
plc_net
,
st
->
features
,
zeros
);
#endif
#if PLC_READ_FEATURES
for
(
i
=
0
;
i
<
NB_FEATURES
;
i
++
)
scanf
(
"%f"
,
&
st
->
features
[
i
]);
#endif
...
...
This diff is collapsed.
Click to expand it.
dnn/lpcnet_private.h
+
2
−
0
View file @
32a63fd3
...
...
@@ -6,6 +6,7 @@
#include
"freq.h"
#include
"lpcnet.h"
#include
"nnet_data.h"
#include
"plc_data.h"
#include
"kiss99.h"
#define BITS_PER_CHAR 8
...
...
@@ -74,6 +75,7 @@ struct LPCNetPLCState {
int
skip_analysis
;
int
blend
;
float
features
[
NB_TOTAL_FEATURES
];
PLCNetState
plc_net
;
};
extern
float
ceps_codebook1
[];
...
...
This diff is collapsed.
Click to expand it.
dnn/training_tf2/dump_plc.py
0 → 100755
+
265
−
0
View file @
32a63fd3
#!/usr/bin/python3
'''
Copyright (c) 2021-2022 Amazon
Copyright (c) 2017-2018 Mozilla
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
- Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
- Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
``AS IS
''
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR
CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
'''
import
lpcnet_plc
import
sys
import
numpy
as
np
from
tensorflow.keras.optimizers
import
Adam
from
tensorflow.keras.layers
import
Layer
,
GRU
,
Dense
,
Conv1D
,
Embedding
import
h5py
import
re
# Flag for dumping e2e (differentiable lpc) network weights
flag_e2e
=
False
max_rnn_neurons
=
1
max_conv_inputs
=
1
def
printVector
(
f
,
vector
,
name
,
dtype
=
'
float
'
,
dotp
=
False
):
if
dotp
:
vector
=
vector
.
reshape
((
vector
.
shape
[
0
]
//
4
,
4
,
vector
.
shape
[
1
]
//
8
,
8
))
vector
=
vector
.
transpose
((
2
,
0
,
3
,
1
))
v
=
np
.
reshape
(
vector
,
(
-
1
));
#print('static const float ', name, '[', len(v), '] = \n', file=f)
f
.
write
(
'
static const {} {}[{}] = {{
\n
'
.
format
(
dtype
,
name
,
len
(
v
)))
for
i
in
range
(
0
,
len
(
v
)):
f
.
write
(
'
{}
'
.
format
(
v
[
i
]))
if
(
i
!=
len
(
v
)
-
1
):
f
.
write
(
'
,
'
)
else
:
break
;
if
(
i
%
8
==
7
):
f
.
write
(
"
\n
"
)
else
:
f
.
write
(
"
"
)
#print(v, file=f)
f
.
write
(
'
\n
};
\n\n
'
)
return
;
def
printSparseVector
(
f
,
A
,
name
,
have_diag
=
True
):
N
=
A
.
shape
[
0
]
M
=
A
.
shape
[
1
]
W
=
np
.
zeros
((
0
,),
dtype
=
'
int
'
)
W0
=
np
.
zeros
((
0
,))
if
have_diag
:
diag
=
np
.
concatenate
([
np
.
diag
(
A
[:,:
N
]),
np
.
diag
(
A
[:,
N
:
2
*
N
]),
np
.
diag
(
A
[:,
2
*
N
:])])
A
[:,:
N
]
=
A
[:,:
N
]
-
np
.
diag
(
np
.
diag
(
A
[:,:
N
]))
A
[:,
N
:
2
*
N
]
=
A
[:,
N
:
2
*
N
]
-
np
.
diag
(
np
.
diag
(
A
[:,
N
:
2
*
N
]))
A
[:,
2
*
N
:]
=
A
[:,
2
*
N
:]
-
np
.
diag
(
np
.
diag
(
A
[:,
2
*
N
:]))
printVector
(
f
,
diag
,
name
+
'
_diag
'
)
AQ
=
np
.
minimum
(
127
,
np
.
maximum
(
-
128
,
np
.
round
(
A
*
128
))).
astype
(
'
int
'
)
idx
=
np
.
zeros
((
0
,),
dtype
=
'
int
'
)
for
i
in
range
(
M
//
8
):
pos
=
idx
.
shape
[
0
]
idx
=
np
.
append
(
idx
,
-
1
)
nb_nonzero
=
0
for
j
in
range
(
N
//
4
):
block
=
A
[
j
*
4
:(
j
+
1
)
*
4
,
i
*
8
:(
i
+
1
)
*
8
]
qblock
=
AQ
[
j
*
4
:(
j
+
1
)
*
4
,
i
*
8
:(
i
+
1
)
*
8
]
if
np
.
sum
(
np
.
abs
(
block
))
>
1e-10
:
nb_nonzero
=
nb_nonzero
+
1
idx
=
np
.
append
(
idx
,
j
*
4
)
vblock
=
qblock
.
transpose
((
1
,
0
)).
reshape
((
-
1
,))
W0
=
np
.
concatenate
([
W0
,
block
.
reshape
((
-
1
,))])
W
=
np
.
concatenate
([
W
,
vblock
])
idx
[
pos
]
=
nb_nonzero
f
.
write
(
'
#ifdef DOT_PROD
\n
'
)
printVector
(
f
,
W
,
name
,
dtype
=
'
qweight
'
)
f
.
write
(
'
#else /*DOT_PROD*/
\n
'
)
printVector
(
f
,
W0
,
name
,
dtype
=
'
qweight
'
)
f
.
write
(
'
#endif /*DOT_PROD*/
\n
'
)
#idx = np.tile(np.concatenate([np.array([N]), np.arange(N)]), 3*N//16)
printVector
(
f
,
idx
,
name
+
'
_idx
'
,
dtype
=
'
int
'
)
return
AQ
def
dump_layer_ignore
(
self
,
f
,
hf
):
print
(
"
ignoring layer
"
+
self
.
name
+
"
of type
"
+
self
.
__class__
.
__name__
)
return
False
Layer
.
dump_layer
=
dump_layer_ignore
def
dump_sparse_gru
(
self
,
f
,
hf
):
global
max_rnn_neurons
name
=
'
sparse_
'
+
self
.
name
print
(
"
printing layer
"
+
name
+
"
of type sparse
"
+
self
.
__class__
.
__name__
)
weights
=
self
.
get_weights
()
qweights
=
printSparseVector
(
f
,
weights
[
1
],
name
+
'
_recurrent_weights
'
)
printVector
(
f
,
weights
[
-
1
],
name
+
'
_bias
'
)
subias
=
weights
[
-
1
].
copy
()
subias
[
1
,:]
=
subias
[
1
,:]
-
np
.
sum
(
qweights
*
(
1.
/
128
),
axis
=
0
)
printVector
(
f
,
subias
,
name
+
'
_subias
'
)
if
hasattr
(
self
,
'
activation
'
):
activation
=
self
.
activation
.
__name__
.
upper
()
else
:
activation
=
'
TANH
'
if
hasattr
(
self
,
'
reset_after
'
)
and
not
self
.
reset_after
:
reset_after
=
0
else
:
reset_after
=
1
neurons
=
weights
[
0
].
shape
[
1
]
//
3
max_rnn_neurons
=
max
(
max_rnn_neurons
,
neurons
)
f
.
write
(
'
const SparseGRULayer {} = {{
\n
{}_bias,
\n
{}_subias,
\n
{}_recurrent_weights_diag,
\n
{}_recurrent_weights,
\n
{}_recurrent_weights_idx,
\n
{}, ACTIVATION_{}, {}
\n
}};
\n\n
'
.
format
(
name
,
name
,
name
,
name
,
name
,
name
,
weights
[
0
].
shape
[
1
]
//
3
,
activation
,
reset_after
))
hf
.
write
(
'
#define {}_OUT_SIZE {}
\n
'
.
format
(
name
.
upper
(),
weights
[
0
].
shape
[
1
]
//
3
))
hf
.
write
(
'
#define {}_STATE_SIZE {}
\n
'
.
format
(
name
.
upper
(),
weights
[
0
].
shape
[
1
]
//
3
))
hf
.
write
(
'
extern const SparseGRULayer {};
\n\n
'
.
format
(
name
));
return
True
def
dump_gru_layer
(
self
,
f
,
hf
):
global
max_rnn_neurons
name
=
self
.
name
print
(
"
printing layer
"
+
name
+
"
of type
"
+
self
.
__class__
.
__name__
)
weights
=
self
.
get_weights
()
qweight
=
printSparseVector
(
f
,
weights
[
0
],
name
+
'
_weights
'
,
have_diag
=
False
)
f
.
write
(
'
#ifdef DOT_PROD
\n
'
)
qweight2
=
np
.
clip
(
np
.
round
(
128.
*
weights
[
1
]).
astype
(
'
int
'
),
-
128
,
127
)
printVector
(
f
,
qweight2
,
name
+
'
_recurrent_weights
'
,
dotp
=
True
,
dtype
=
'
qweight
'
)
f
.
write
(
'
#else /*DOT_PROD*/
\n
'
)
printVector
(
f
,
weights
[
1
],
name
+
'
_recurrent_weights
'
)
f
.
write
(
'
#endif /*DOT_PROD*/
\n
'
)
printVector
(
f
,
weights
[
-
1
],
name
+
'
_bias
'
)
subias
=
weights
[
-
1
].
copy
()
subias
[
0
,:]
=
subias
[
0
,:]
-
np
.
sum
(
qweight
*
(
1.
/
128.
),
axis
=
0
)
subias
[
1
,:]
=
subias
[
1
,:]
-
np
.
sum
(
qweight2
*
(
1.
/
128.
),
axis
=
0
)
printVector
(
f
,
subias
,
name
+
'
_subias
'
)
if
hasattr
(
self
,
'
activation
'
):
activation
=
self
.
activation
.
__name__
.
upper
()
else
:
activation
=
'
TANH
'
if
hasattr
(
self
,
'
reset_after
'
)
and
not
self
.
reset_after
:
reset_after
=
0
else
:
reset_after
=
1
neurons
=
weights
[
0
].
shape
[
1
]
//
3
max_rnn_neurons
=
max
(
max_rnn_neurons
,
neurons
)
f
.
write
(
'
const GRULayer {} = {{
\n
{}_bias,
\n
{}_subias,
\n
{}_weights,
\n
{}_weights_idx,
\n
{}_recurrent_weights,
\n
{}, {}, ACTIVATION_{}, {}
\n
}};
\n\n
'
.
format
(
name
,
name
,
name
,
name
,
name
,
name
,
weights
[
0
].
shape
[
0
],
weights
[
0
].
shape
[
1
]
//
3
,
activation
,
reset_after
))
hf
.
write
(
'
#define {}_OUT_SIZE {}
\n
'
.
format
(
name
.
upper
(),
weights
[
0
].
shape
[
1
]
//
3
))
hf
.
write
(
'
#define {}_STATE_SIZE {}
\n
'
.
format
(
name
.
upper
(),
weights
[
0
].
shape
[
1
]
//
3
))
hf
.
write
(
'
extern const GRULayer {};
\n\n
'
.
format
(
name
));
return
True
GRU
.
dump_layer
=
dump_gru_layer
def
dump_gru_layer_dummy
(
self
,
f
,
hf
):
name
=
self
.
name
weights
=
self
.
get_weights
()
hf
.
write
(
'
#define {}_OUT_SIZE {}
\n
'
.
format
(
name
.
upper
(),
weights
[
0
].
shape
[
1
]
//
3
))
hf
.
write
(
'
#define {}_STATE_SIZE {}
\n
'
.
format
(
name
.
upper
(),
weights
[
0
].
shape
[
1
]
//
3
))
return
True
;
#GRU.dump_layer = dump_gru_layer_dummy
def
dump_dense_layer_impl
(
name
,
weights
,
bias
,
activation
,
f
,
hf
):
printVector
(
f
,
weights
,
name
+
'
_weights
'
)
printVector
(
f
,
bias
,
name
+
'
_bias
'
)
f
.
write
(
'
const DenseLayer {} = {{
\n
{}_bias,
\n
{}_weights,
\n
{}, {}, ACTIVATION_{}
\n
}};
\n\n
'
.
format
(
name
,
name
,
name
,
weights
.
shape
[
0
],
weights
.
shape
[
1
],
activation
))
hf
.
write
(
'
#define {}_OUT_SIZE {}
\n
'
.
format
(
name
.
upper
(),
weights
.
shape
[
1
]))
hf
.
write
(
'
extern const DenseLayer {};
\n\n
'
.
format
(
name
));
def
dump_dense_layer
(
self
,
f
,
hf
):
name
=
self
.
name
print
(
"
printing layer
"
+
name
+
"
of type
"
+
self
.
__class__
.
__name__
)
weights
=
self
.
get_weights
()
activation
=
self
.
activation
.
__name__
.
upper
()
dump_dense_layer_impl
(
name
,
weights
[
0
],
weights
[
1
],
activation
,
f
,
hf
)
return
False
Dense
.
dump_layer
=
dump_dense_layer
def
dump_conv1d_layer
(
self
,
f
,
hf
):
global
max_conv_inputs
name
=
self
.
name
print
(
"
printing layer
"
+
name
+
"
of type
"
+
self
.
__class__
.
__name__
)
weights
=
self
.
get_weights
()
printVector
(
f
,
weights
[
0
],
name
+
'
_weights
'
)
printVector
(
f
,
weights
[
-
1
],
name
+
'
_bias
'
)
activation
=
self
.
activation
.
__name__
.
upper
()
max_conv_inputs
=
max
(
max_conv_inputs
,
weights
[
0
].
shape
[
1
]
*
weights
[
0
].
shape
[
0
])
f
.
write
(
'
const Conv1DLayer {} = {{
\n
{}_bias,
\n
{}_weights,
\n
{}, {}, {}, ACTIVATION_{}
\n
}};
\n\n
'
.
format
(
name
,
name
,
name
,
weights
[
0
].
shape
[
1
],
weights
[
0
].
shape
[
0
],
weights
[
0
].
shape
[
2
],
activation
))
hf
.
write
(
'
#define {}_OUT_SIZE {}
\n
'
.
format
(
name
.
upper
(),
weights
[
0
].
shape
[
2
]))
hf
.
write
(
'
#define {}_STATE_SIZE ({}*{})
\n
'
.
format
(
name
.
upper
(),
weights
[
0
].
shape
[
1
],
(
weights
[
0
].
shape
[
0
]
-
1
)))
hf
.
write
(
'
#define {}_DELAY {}
\n
'
.
format
(
name
.
upper
(),
(
weights
[
0
].
shape
[
0
]
-
1
)
//
2
))
hf
.
write
(
'
extern const Conv1DLayer {};
\n\n
'
.
format
(
name
));
return
True
Conv1D
.
dump_layer
=
dump_conv1d_layer
filename
=
sys
.
argv
[
1
]
with
h5py
.
File
(
filename
,
"
r
"
)
as
f
:
units
=
min
(
f
[
'
model_weights
'
][
'
plc_gru1
'
][
'
plc_gru1
'
][
'
recurrent_kernel:0
'
].
shape
)
units2
=
min
(
f
[
'
model_weights
'
][
'
plc_gru2
'
][
'
plc_gru2
'
][
'
recurrent_kernel:0
'
].
shape
)
cond_size
=
f
[
'
model_weights
'
][
'
plc_dense1
'
][
'
plc_dense1
'
][
'
kernel:0
'
].
shape
[
1
]
model
=
lpcnet_plc
.
new_lpcnet_plc_model
(
rnn_units
=
units
,
cond_size
=
cond_size
)
model
.
compile
(
optimizer
=
'
adam
'
,
loss
=
'
sparse_categorical_crossentropy
'
,
metrics
=
[
'
sparse_categorical_accuracy
'
])
#model.summary()
model
.
load_weights
(
filename
,
by_name
=
True
)
if
len
(
sys
.
argv
)
>
2
:
cfile
=
sys
.
argv
[
2
];
hfile
=
sys
.
argv
[
3
];
else
:
cfile
=
'
plc_data.c
'
hfile
=
'
plc_data.h
'
f
=
open
(
cfile
,
'
w
'
)
hf
=
open
(
hfile
,
'
w
'
)
f
.
write
(
'
/*This file is automatically generated from a Keras model*/
\n
'
)
f
.
write
(
'
/*based on model {}*/
\n\n
'
.
format
(
sys
.
argv
[
1
]))
f
.
write
(
'
#ifdef HAVE_CONFIG_H
\n
#include
"
config.h
"
\n
#endif
\n\n
#include
"
nnet.h
"
\n
#include
"
{}
"
\n\n
'
.
format
(
hfile
))
hf
.
write
(
'
/*This file is automatically generated from a Keras model*/
\n\n
'
)
hf
.
write
(
'
#ifndef PLC_DATA_H
\n
#define PLC_DATA_H
\n\n
#include
"
nnet.h
"
\n\n
'
)
layer_list
=
[]
for
i
,
layer
in
enumerate
(
model
.
layers
):
if
layer
.
dump_layer
(
f
,
hf
):
layer_list
.
append
(
layer
.
name
)
#dump_sparse_gru(model.get_layer('gru_a'), f, hf)
hf
.
write
(
'
#define PLC_MAX_RNN_NEURONS {}
\n\n
'
.
format
(
max_rnn_neurons
))
#hf.write('#define PLC_MAX_CONV_INPUTS {}\n\n'.format(max_conv_inputs))
hf
.
write
(
'
typedef struct {
\n
'
)
for
i
,
name
in
enumerate
(
layer_list
):
hf
.
write
(
'
float {}_state[{}_STATE_SIZE];
\n
'
.
format
(
name
,
name
.
upper
()))
hf
.
write
(
'
} PLCNetState;
\n
'
)
hf
.
write
(
'
\n\n
#endif
\n
'
)
f
.
close
()
hf
.
close
()
This diff is collapsed.
Click to expand it.
Preview
0%
Loading
Try again
or
attach a new file
.
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Save comment
Cancel
Please
register
or
sign in
to comment