Unverified Commit 0bcf788e authored by Jean-Marc Valin's avatar Jean-Marc Valin
Browse files

RNN C code

parent cf473ce2
/* Copyright (c) 2008-2011 Octasic Inc.
2012-2017 Jean-Marc Valin */
/*
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
- Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
- Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR
CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifdef HAVE_CONFIG_H
#include "config.h"
#endif
#include <math.h>
#include "opus_types.h"
#include "common.h"
#include "arch.h"
#include "tansig_table.h"
#include "rnn.h"
#include <stdio.h>
static OPUS_INLINE float tansig_approx(float x)
{
int i;
float y, dy;
float sign=1;
/* Tests are reversed to catch NaNs */
if (!(x<8))
return 1;
if (!(x>-8))
return -1;
#ifndef FIXED_POINT
/* Another check in case of -ffast-math */
if (celt_isnan(x))
return 0;
#endif
if (x<0)
{
x=-x;
sign=-1;
}
i = (int)floor(.5f+25*x);
x -= .04f*i;
y = tansig_table[i];
dy = 1-y*y;
y = y + x*dy*(1 - y*x);
return sign*y;
}
static OPUS_INLINE float sigmoid_approx(float x)
{
return .5 + .5*tansig_approx(.5*x);
}
static OPUS_INLINE float relu(float x)
{
return x < 0 ? 0 : x;
}
void compute_dense(const DenseLayer *layer, float *output, const float *input)
{
int i, j;
int N, M;
int stride;
M = layer->nb_inputs;
N = layer->nb_neurons;
stride = N;
for (i=0;i<N;i++)
{
/* Compute update gate. */
float sum = layer->bias[i];
for (j=0;j<M;j++)
sum += layer->input_weights[j*stride + i]*input[j];
output[i] = WEIGHTS_SCALE*sum;
}
if (layer->activation == activation_sigmoid) {
for (i=0;i<N;i++)
output[i] = sigmoid_approx(output[i]);
} else if (layer->activation == activation_tanh) {
for (i=0;i<N;i++)
output[i] = tansig_approx(output[i]);
} else if (layer->activation == activation_relu) {
for (i=0;i<N;i++)
output[i] = relu(output[i]);
} else {
*(int*)0=0;
}
}
void compute_gru(const GRULayer *gru, float *state, const float *input)
{
int i, j;
int N, M;
int stride;
float z[MAX_NEURONS];
float r[MAX_NEURONS];
float h[MAX_NEURONS];
M = gru->nb_inputs;
N = gru->nb_neurons;
stride = 3*N;
for (i=0;i<N;i++)
{
/* Compute update gate. */
float sum = gru->bias[i];
for (j=0;j<M;j++)
sum += gru->input_weights[j*stride + i]*input[j];
for (j=0;j<N;j++)
sum += gru->recurrent_weights[j*stride + i]*state[j];
z[i] = sigmoid_approx(WEIGHTS_SCALE*sum);
}
for (i=0;i<N;i++)
{
/* Compute reset gate. */
float sum = gru->bias[N + i];
for (j=0;j<M;j++)
sum += gru->input_weights[N + j*stride + i]*input[j];
for (j=0;j<N;j++)
sum += gru->recurrent_weights[N + j*stride + i]*state[j];
r[i] = sigmoid_approx(WEIGHTS_SCALE*sum);
}
for (i=0;i<N;i++)
{
/* Compute output. */
float sum = gru->bias[2*N + i];
for (j=0;j<M;j++)
sum += gru->input_weights[2*N + j*stride + i]*input[j];
for (j=0;j<N;j++)
sum += gru->recurrent_weights[2*N + j*stride + i]*state[j]*r[j];
if (gru->activation == activation_sigmoid) sum = sigmoid_approx(WEIGHTS_SCALE*sum);
else if (gru->activation == activation_tanh) sum = tansig_approx(WEIGHTS_SCALE*sum);
else if (gru->activation == activation_relu) sum = relu(WEIGHTS_SCALE*sum);
else *(int*)0=0;
h[i] = z[i]*state[i] + (1-z[i])*sum;
}
for (i=0;i<N;i++)
state[i] = h[i];
}
#if 1
#define INPUT_SIZE 42
#define DENSE_SIZE 12
#define VAD_SIZE 12
#define NOISE_SIZE 48
#define DENOISE_SIZE 128
extern const DenseLayer input_dense;
extern const GRULayer vad_gru;
extern const GRULayer noise_gru;
extern const GRULayer denoise_gru;
extern const DenseLayer denoise_output;
extern const DenseLayer vad_output;
int main() {
float vad_state[MAX_NEURONS] = {0};
float vad_out[MAX_NEURONS] = {0};
float input[INPUT_SIZE];
float dense_out[MAX_NEURONS];
float noise_input[MAX_NEURONS*3];
float denoise_input[MAX_NEURONS*3];
float noise_state[MAX_NEURONS] = {0};
float denoise_state[MAX_NEURONS] = {0};
float gains[22];
while (1)
{
int i;
for (i=0;i<INPUT_SIZE;i++) scanf("%f", &input[i]);
for (i=0;i<45;i++) scanf("%f", &vad_out[0]);
if (feof(stdin)) break;
compute_dense(&input_dense, dense_out, input);
compute_gru(&vad_gru, vad_state, dense_out);
compute_dense(&vad_output, vad_out, vad_state);
#if 1
for (i=0;i<DENSE_SIZE;i++) noise_input[i] = dense_out[i];
for (i=0;i<VAD_SIZE;i++) noise_input[i+DENSE_SIZE] = vad_state[i];
for (i=0;i<INPUT_SIZE;i++) noise_input[i+DENSE_SIZE+VAD_SIZE] = input[i];
compute_gru(&noise_gru, noise_state, noise_input);
for (i=0;i<VAD_SIZE;i++) denoise_input[i] = vad_state[i];
for (i=0;i<NOISE_SIZE;i++) denoise_input[i+VAD_SIZE] = noise_state[i];
for (i=0;i<INPUT_SIZE;i++) denoise_input[i+VAD_SIZE+NOISE_SIZE] = input[i];
compute_gru(&denoise_gru, denoise_state, denoise_input);
compute_dense(&denoise_output, gains, denoise_state);
for (i=0;i<22;i++) printf("%f ", gains[i]);
#endif
printf("%f\n", vad_out[0]);
}
}
#endif
/* Copyright (c) 2017 Jean-Marc Valin */
/*
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
- Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
- Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR
CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef RNN_H_
#define RNN_H_
#include "opus_types.h"
#define WEIGHTS_SCALE (1.f/8192)
#define MAX_NEURONS 128
#define activation_tanh 0
#define activation_sigmoid 1
#define activation_relu 2
typedef struct {
const opus_int16 *bias;
const opus_int16 *input_weights;
int nb_inputs;
int nb_neurons;
int activation;
} DenseLayer;
typedef struct {
const opus_int16 *bias;
const opus_int16 *input_weights;
const opus_int16 *recurrent_weights;
int nb_inputs;
int nb_neurons;
int activation;
} GRULayer;
void compute_dense(const DenseLayer *layer, float *output, const float *input);
void compute_gru(const GRULayer *gru, float *state, const float *input);
#endif /* _MLP_H_ */
/* This file is auto-generated by gen_tables */
static const float tansig_table[201] = {
0.000000f, 0.039979f, 0.079830f, 0.119427f, 0.158649f,
0.197375f, 0.235496f, 0.272905f, 0.309507f, 0.345214f,
0.379949f, 0.413644f, 0.446244f, 0.477700f, 0.507977f,
0.537050f, 0.564900f, 0.591519f, 0.616909f, 0.641077f,
0.664037f, 0.685809f, 0.706419f, 0.725897f, 0.744277f,
0.761594f, 0.777888f, 0.793199f, 0.807569f, 0.821040f,
0.833655f, 0.845456f, 0.856485f, 0.866784f, 0.876393f,
0.885352f, 0.893698f, 0.901468f, 0.908698f, 0.915420f,
0.921669f, 0.927473f, 0.932862f, 0.937863f, 0.942503f,
0.946806f, 0.950795f, 0.954492f, 0.957917f, 0.961090f,
0.964028f, 0.966747f, 0.969265f, 0.971594f, 0.973749f,
0.975743f, 0.977587f, 0.979293f, 0.980869f, 0.982327f,
0.983675f, 0.984921f, 0.986072f, 0.987136f, 0.988119f,
0.989027f, 0.989867f, 0.990642f, 0.991359f, 0.992020f,
0.992631f, 0.993196f, 0.993718f, 0.994199f, 0.994644f,
0.995055f, 0.995434f, 0.995784f, 0.996108f, 0.996407f,
0.996682f, 0.996937f, 0.997172f, 0.997389f, 0.997590f,
0.997775f, 0.997946f, 0.998104f, 0.998249f, 0.998384f,
0.998508f, 0.998623f, 0.998728f, 0.998826f, 0.998916f,
0.999000f, 0.999076f, 0.999147f, 0.999213f, 0.999273f,
0.999329f, 0.999381f, 0.999428f, 0.999472f, 0.999513f,
0.999550f, 0.999585f, 0.999617f, 0.999646f, 0.999673f,
0.999699f, 0.999722f, 0.999743f, 0.999763f, 0.999781f,
0.999798f, 0.999813f, 0.999828f, 0.999841f, 0.999853f,
0.999865f, 0.999875f, 0.999885f, 0.999893f, 0.999902f,
0.999909f, 0.999916f, 0.999923f, 0.999929f, 0.999934f,
0.999939f, 0.999944f, 0.999948f, 0.999952f, 0.999956f,
0.999959f, 0.999962f, 0.999965f, 0.999968f, 0.999970f,
0.999973f, 0.999975f, 0.999977f, 0.999978f, 0.999980f,
0.999982f, 0.999983f, 0.999984f, 0.999986f, 0.999987f,
0.999988f, 0.999989f, 0.999990f, 0.999990f, 0.999991f,
0.999992f, 0.999992f, 0.999993f, 0.999994f, 0.999994f,
0.999994f, 0.999995f, 0.999995f, 0.999996f, 0.999996f,
0.999996f, 0.999997f, 0.999997f, 0.999997f, 0.999997f,
0.999997f, 0.999998f, 0.999998f, 0.999998f, 0.999998f,
0.999998f, 0.999998f, 0.999999f, 0.999999f, 0.999999f,
0.999999f, 0.999999f, 0.999999f, 0.999999f, 0.999999f,
0.999999f, 0.999999f, 0.999999f, 0.999999f, 0.999999f,
1.000000f, 1.000000f, 1.000000f, 1.000000f, 1.000000f,
1.000000f, 1.000000f, 1.000000f, 1.000000f, 1.000000f,
1.000000f,
};
#!/usr/bin/python
from __future__ import print_function
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import LSTM
from keras.layers import GRU
from keras.models import load_model
from keras import backend as K
import sys
import re
import numpy as np
def printVector(f, vector, name):
v = np.reshape(vector, (-1));
#print('static const float ', name, '[', len(v), '] = \n', file=f)
f.write('static const opus_int16 {}[{}] = {{\n '.format(name, len(v)))
for i in range(0, len(v)):
f.write('{}'.format(int(round(8192*v[i]))))
if (i!=len(v)-1):
f.write(',')
else:
break;
if (i%8==7):
f.write("\n ")
else:
f.write(" ")
#print(v, file=f)
f.write('\n};\n\n')
return;
def printLayer(f, layer):
weights = layer.get_weights()
printVector(f, weights[0], layer.name + '_weights')
if len(weights) > 2:
printVector(f, weights[1], layer.name + '_recurrent_weights')
printVector(f, weights[-1], layer.name + '_bias')
name = layer.name
activation = re.search('function (.*) at', str(layer.activation)).group(1)
if len(weights) > 2:
f.write('const GRULayer {} = {{\n {}_bias,\n {}_weights,\n {}_recurrent_weights,\n {}, {}, activation_{}\n}};\n\n'
.format(name, name, name, name, weights[0].shape[0], weights[0].shape[1]/3, activation))
else:
f.write('const DenseLayer {} = {{\n {}_bias,\n {}_weights,\n {}, {}, activation_{}\n}};\n\n'
.format(name, name, name, weights[0].shape[0], weights[0].shape[1], activation))
def mean_squared_sqrt_error(y_true, y_pred):
return K.mean(K.square(K.sqrt(y_pred) - K.sqrt(y_true)), axis=-1)
model = load_model(sys.argv[1], custom_objects={'msse': mean_squared_sqrt_error, 'mean_squared_sqrt_error': mean_squared_sqrt_error, 'my_crossentropy': mean_squared_sqrt_error, 'mycost': mean_squared_sqrt_error})
weights = model.get_weights()
f = open(sys.argv[2], 'w')
f.write('/*This file is automatically generated from a Keras model*/\n\n')
f.write('#ifdef HAVE_CONFIG_H\n#include "config.h"\n#endif\n\n#include "rnn.h"\n\n')
for i, layer in enumerate(model.layers):
if len(layer.get_weights()) > 0:
printLayer(f, layer)
f.close()
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment