Unverified Commit b3abc616 authored by Jean-Marc Valin's avatar Jean-Marc Valin
Browse files

automate header generation too

parent 0bcf788e
......@@ -35,6 +35,7 @@
#include "arch.h"
#include "tansig_table.h"
#include "rnn.h"
#include "rnn_data.h"
#include <stdio.h>
static OPUS_INLINE float tansig_approx(float x)
......@@ -91,13 +92,13 @@ void compute_dense(const DenseLayer *layer, float *output, const float *input)
sum += layer->input_weights[j*stride + i]*input[j];
output[i] = WEIGHTS_SCALE*sum;
}
if (layer->activation == activation_sigmoid) {
if (layer->activation == ACTIVATION_SIGMOID) {
for (i=0;i<N;i++)
output[i] = sigmoid_approx(output[i]);
} else if (layer->activation == activation_tanh) {
} else if (layer->activation == ACTIVATION_TANH) {
for (i=0;i<N;i++)
output[i] = tansig_approx(output[i]);
} else if (layer->activation == activation_relu) {
} else if (layer->activation == ACTIVATION_RELU) {
for (i=0;i<N;i++)
output[i] = relu(output[i]);
} else {
......@@ -144,9 +145,9 @@ void compute_gru(const GRULayer *gru, float *state, const float *input)
sum += gru->input_weights[2*N + j*stride + i]*input[j];
for (j=0;j<N;j++)
sum += gru->recurrent_weights[2*N + j*stride + i]*state[j]*r[j];
if (gru->activation == activation_sigmoid) sum = sigmoid_approx(WEIGHTS_SCALE*sum);
else if (gru->activation == activation_tanh) sum = tansig_approx(WEIGHTS_SCALE*sum);
else if (gru->activation == activation_relu) sum = relu(WEIGHTS_SCALE*sum);
if (gru->activation == ACTIVATION_SIGMOID) sum = sigmoid_approx(WEIGHTS_SCALE*sum);
else if (gru->activation == ACTIVATION_TANH) sum = tansig_approx(WEIGHTS_SCALE*sum);
else if (gru->activation == ACTIVATION_RELU) sum = relu(WEIGHTS_SCALE*sum);
else *(int*)0=0;
h[i] = z[i]*state[i] + (1-z[i])*sum;
}
......@@ -156,17 +157,6 @@ void compute_gru(const GRULayer *gru, float *state, const float *input)
#if 1
#define INPUT_SIZE 42
#define DENSE_SIZE 12
#define VAD_SIZE 12
#define NOISE_SIZE 48
#define DENOISE_SIZE 128
extern const DenseLayer input_dense;
extern const GRULayer vad_gru;
extern const GRULayer noise_gru;
extern const GRULayer denoise_gru;
extern const DenseLayer denoise_output;
extern const DenseLayer vad_output;
int main() {
float vad_state[MAX_NEURONS] = {0};
......@@ -175,8 +165,8 @@ int main() {
float dense_out[MAX_NEURONS];
float noise_input[MAX_NEURONS*3];
float denoise_input[MAX_NEURONS*3];
float noise_state[MAX_NEURONS] = {0};
float denoise_state[MAX_NEURONS] = {0};
float noise_state[NOISE_GRU_SIZE] = {0};
float denoise_state[DENOISE_GRU_SIZE] = {0};
float gains[22];
while (1)
{
......@@ -188,14 +178,14 @@ int main() {
compute_gru(&vad_gru, vad_state, dense_out);
compute_dense(&vad_output, vad_out, vad_state);
#if 1
for (i=0;i<DENSE_SIZE;i++) noise_input[i] = dense_out[i];
for (i=0;i<VAD_SIZE;i++) noise_input[i+DENSE_SIZE] = vad_state[i];
for (i=0;i<INPUT_SIZE;i++) noise_input[i+DENSE_SIZE+VAD_SIZE] = input[i];
for (i=0;i<INPUT_DENSE_SIZE;i++) noise_input[i] = dense_out[i];
for (i=0;i<VAD_GRU_SIZE;i++) noise_input[i+INPUT_DENSE_SIZE] = vad_state[i];
for (i=0;i<INPUT_SIZE;i++) noise_input[i+INPUT_DENSE_SIZE+VAD_GRU_SIZE] = input[i];
compute_gru(&noise_gru, noise_state, noise_input);
for (i=0;i<VAD_SIZE;i++) denoise_input[i] = vad_state[i];
for (i=0;i<NOISE_SIZE;i++) denoise_input[i+VAD_SIZE] = noise_state[i];
for (i=0;i<INPUT_SIZE;i++) denoise_input[i+VAD_SIZE+NOISE_SIZE] = input[i];
for (i=0;i<VAD_GRU_SIZE;i++) denoise_input[i] = vad_state[i];
for (i=0;i<NOISE_GRU_SIZE;i++) denoise_input[i+VAD_GRU_SIZE] = noise_state[i];
for (i=0;i<INPUT_SIZE;i++) denoise_input[i+VAD_GRU_SIZE+NOISE_GRU_SIZE] = input[i];
compute_gru(&denoise_gru, denoise_state, denoise_input);
compute_dense(&denoise_output, gains, denoise_state);
......
......@@ -33,9 +33,9 @@
#define MAX_NEURONS 128
#define activation_tanh 0
#define activation_sigmoid 1
#define activation_relu 2
#define ACTIVATION_TANH 0
#define ACTIVATION_SIGMOID 1
#define ACTIVATION_RELU 2
typedef struct {
const opus_int16 *bias;
......
......@@ -30,20 +30,24 @@ def printVector(f, vector, name):
f.write('\n};\n\n')
return;
def printLayer(f, layer):
def printLayer(f, hf, layer):
weights = layer.get_weights()
printVector(f, weights[0], layer.name + '_weights')
if len(weights) > 2:
printVector(f, weights[1], layer.name + '_recurrent_weights')
printVector(f, weights[-1], layer.name + '_bias')
name = layer.name
activation = re.search('function (.*) at', str(layer.activation)).group(1)
activation = re.search('function (.*) at', str(layer.activation)).group(1).upper()
if len(weights) > 2:
f.write('const GRULayer {} = {{\n {}_bias,\n {}_weights,\n {}_recurrent_weights,\n {}, {}, activation_{}\n}};\n\n'
f.write('const GRULayer {} = {{\n {}_bias,\n {}_weights,\n {}_recurrent_weights,\n {}, {}, ACTIVATION_{}\n}};\n\n'
.format(name, name, name, name, weights[0].shape[0], weights[0].shape[1]/3, activation))
hf.write('#define {}_SIZE {}\n'.format(name.upper(), weights[0].shape[1]/3))
hf.write('extern const GRULayer {};\n\n'.format(name));
else:
f.write('const DenseLayer {} = {{\n {}_bias,\n {}_weights,\n {}, {}, activation_{}\n}};\n\n'
f.write('const DenseLayer {} = {{\n {}_bias,\n {}_weights,\n {}, {}, ACTIVATION_{}\n}};\n\n'
.format(name, name, name, weights[0].shape[0], weights[0].shape[1], activation))
hf.write('#define {}_SIZE {}\n'.format(name.upper(), weights[0].shape[1]))
hf.write('extern const DenseLayer {};\n\n'.format(name));
def mean_squared_sqrt_error(y_true, y_pred):
......@@ -55,13 +59,19 @@ model = load_model(sys.argv[1], custom_objects={'msse': mean_squared_sqrt_error,
weights = model.get_weights()
f = open(sys.argv[2], 'w')
hf = open(sys.argv[3], 'w')
f.write('/*This file is automatically generated from a Keras model*/\n\n')
f.write('#ifdef HAVE_CONFIG_H\n#include "config.h"\n#endif\n\n#include "rnn.h"\n\n')
hf.write('/*This file is automatically generated from a Keras model*/\n\n')
hf.write('#ifndef RNN_DATA_H\n#define RNN_DATA_H\n\n#include "rnn.h"\n\n')
for i, layer in enumerate(model.layers):
if len(layer.get_weights()) > 0:
printLayer(f, layer)
printLayer(f, hf, layer)
hf.write('\n\n#endif\n')
f.close()
hf.close()
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment