Skip to content
Snippets Groups Projects
Unverified Commit 6a9831a6 authored by Jean-Marc Valin's avatar Jean-Marc Valin
Browse files

Remove run-time code for old TF2 models

No longer needed now that PLC is trained with PyTorch stack
parent 1ddfcfd4
No related branches found
No related tags found
No related merge requests found
Pipeline #5064 passed
...@@ -115,78 +115,6 @@ void compute_glu(const LinearLayer *layer, float *output, const float *input, in ...@@ -115,78 +115,6 @@ void compute_glu(const LinearLayer *layer, float *output, const float *input, in
} }
} }
void _lpcnet_compute_dense(const DenseLayer *layer, float *output, const float *input, int arch)
{
LinearLayer matrix;
celt_assert(input != output);
matrix.bias = layer->bias;
matrix.subias = NULL;
matrix.float_weights = layer->input_weights;
matrix.weights = NULL;
matrix.weights_idx = NULL;
matrix.diag = NULL;
matrix.nb_inputs = layer->nb_inputs;
matrix.nb_outputs = layer->nb_neurons;
matrix.scale = NULL;
compute_linear(&matrix, output, input, arch);
compute_activation(output, output, layer->nb_neurons, layer->activation, arch);
}
#ifdef USE_SU_BIAS
#define bias_type subias
#else
#define bias_type bias
#endif
#define MAX_IDX_SIZE 8192
void compute_gruB(const GRULayer *gru, const float* gru_b_condition, float *state, const float *input, int arch)
{
LinearLayer in_matrix, rec_matrix;
int i, M, N;
float bias[3*MAX_RNN_NEURONS_ALL];
float scale[3*MAX_RNN_NEURONS_ALL];
M = gru->nb_inputs;
N = gru->nb_neurons;
in_matrix.bias = bias;
in_matrix.diag = NULL;
in_matrix.nb_inputs = M;
in_matrix.nb_outputs = 3*N;
in_matrix.subias = bias;
#ifdef DISABLE_DOT_PROD
for (i=0;i<3*N;i++) bias[i] = gru->bias[i] + gru_b_condition[i];
in_matrix.scale = NULL;
in_matrix.float_weights = gru->input_weights;
in_matrix.weights = NULL;
#else
for (i=0;i<3*N;i++) bias[i] = gru->bias_type[i] + gru_b_condition[i];
for (i=0;i<3*N;i++) scale[i] = SCALE_1;
in_matrix.scale = scale;
in_matrix.weights = gru->input_weights;
in_matrix.float_weights = NULL;
#endif
in_matrix.weights_idx = gru->input_weights_idx;
rec_matrix.bias = &gru->bias[3*N];
rec_matrix.diag = NULL;
rec_matrix.nb_inputs = N;
rec_matrix.nb_outputs = 3*N;
rec_matrix.scale = scale;
rec_matrix.subias = &gru->subias[3*N];
#ifdef DISABLE_DOT_PROD
rec_matrix.scale = NULL;
rec_matrix.float_weights = gru->recurrent_weights;
rec_matrix.weights = NULL;
#else
rec_matrix.scale = scale;
rec_matrix.weights = gru->recurrent_weights;
rec_matrix.float_weights = NULL;
#endif
rec_matrix.weights_idx = NULL;
compute_generic_gru(&in_matrix, &rec_matrix, state, input, arch);
}
#define MAX_CONV_INPUTS_ALL DRED_MAX_CONV_INPUTS #define MAX_CONV_INPUTS_ALL DRED_MAX_CONV_INPUTS
void compute_generic_conv1d(const LinearLayer *layer, float *output, float *mem, const float *input, int input_size, int activation, int arch) void compute_generic_conv1d(const LinearLayer *layer, float *output, float *mem, const float *input, int input_size, int activation, int arch)
......
...@@ -31,13 +31,6 @@ ...@@ -31,13 +31,6 @@
#include <stddef.h> #include <stddef.h>
#include "opus_types.h" #include "opus_types.h"
#ifdef DISABLE_DOT_PROD
typedef float qweight;
#else
typedef signed char qweight;
#define DOT_PROD
#endif
#define ACTIVATION_LINEAR 0 #define ACTIVATION_LINEAR 0
#define ACTIVATION_SIGMOID 1 #define ACTIVATION_SIGMOID 1
#define ACTIVATION_TANH 2 #define ACTIVATION_TANH 2
...@@ -91,40 +84,6 @@ typedef struct { ...@@ -91,40 +84,6 @@ typedef struct {
int kheight; int kheight;
} Conv2dLayer; } Conv2dLayer;
typedef struct {
const float *bias;
const float *input_weights;
int nb_inputs;
int nb_neurons;
int activation;
} DenseLayer;
typedef struct {
const float *bias;
const float *subias;
const qweight *input_weights;
const int *input_weights_idx;
const qweight *recurrent_weights;
int nb_inputs;
int nb_neurons;
int activation;
int reset_after;
} GRULayer;
typedef struct {
const float *bias;
const float *input_weights;
int nb_inputs;
int kernel_size;
int nb_neurons;
int activation;
} Conv1DLayer;
typedef struct {
const float *embedding_weights;
int nb_inputs;
int dim;
} EmbeddingLayer;
void compute_generic_dense(const LinearLayer *layer, float *output, const float *input, int activation, int arch); void compute_generic_dense(const LinearLayer *layer, float *output, const float *input, int activation, int arch);
void compute_generic_gru(const LinearLayer *input_weights, const LinearLayer *recurrent_weights, float *state, const float *in, int arch); void compute_generic_gru(const LinearLayer *input_weights, const LinearLayer *recurrent_weights, float *state, const float *in, int arch);
...@@ -134,10 +93,6 @@ void compute_glu(const LinearLayer *layer, float *output, const float *input, in ...@@ -134,10 +93,6 @@ void compute_glu(const LinearLayer *layer, float *output, const float *input, in
void compute_gated_activation(const LinearLayer *layer, float *output, const float *input, int activation, int arch); void compute_gated_activation(const LinearLayer *layer, float *output, const float *input, int activation, int arch);
void _lpcnet_compute_dense(const DenseLayer *layer, float *output, const float *input, int arch);
void compute_gruB(const GRULayer *gru, const float* gru_b_condition, float *state, const float *input, int arch);
int parse_weights(WeightArray **list, const unsigned char *data, int len); int parse_weights(WeightArray **list, const unsigned char *data, int len);
...@@ -169,24 +124,6 @@ int conv2d_init(Conv2dLayer *layer, const WeightArray *arrays, ...@@ -169,24 +124,6 @@ int conv2d_init(Conv2dLayer *layer, const WeightArray *arrays,
int ktime, int ktime,
int kheight); int kheight);
int dense_init(DenseLayer *layer, const WeightArray *arrays,
const char *bias,
const char *input_weights,
int nb_inputs,
int nb_neurons,
int activation);
int gru_init(GRULayer *layer, const WeightArray *arrays,
const char *bias,
const char *subias,
const char *input_weights,
const char *input_weights_idx,
const char *recurrent_weights,
int nb_inputs,
int nb_neurons,
int activation,
int reset_after);
void compute_linear_c(const LinearLayer *linear, float *out, const float *in); void compute_linear_c(const LinearLayer *linear, float *out, const float *in);
void compute_activation_c(float *output, const float *input, int N, int activation); void compute_activation_c(float *output, const float *input, int N, int activation);
......
...@@ -176,46 +176,6 @@ int linear_init(LinearLayer *layer, const WeightArray *arrays, ...@@ -176,46 +176,6 @@ int linear_init(LinearLayer *layer, const WeightArray *arrays,
return 0; return 0;
} }
int dense_init(DenseLayer *layer, const WeightArray *arrays,
const char *bias,
const char *input_weights,
int nb_inputs,
int nb_neurons,
int activation)
{
if ((layer->bias = find_array_check(arrays, bias, nb_neurons*sizeof(layer->bias[0]))) == NULL) return 1;
if ((layer->input_weights = find_array_check(arrays, input_weights, nb_inputs*nb_neurons*sizeof(layer->input_weights[0]))) == NULL) return 1;
layer->nb_inputs = nb_inputs;
layer->nb_neurons = nb_neurons;
layer->activation = activation;
return 0;
}
int gru_init(GRULayer *layer, const WeightArray *arrays,
const char *bias,
const char *subias,
const char *input_weights,
const char *input_weights_idx,
const char *recurrent_weights,
int nb_inputs,
int nb_neurons,
int activation,
int reset_after)
{
int total_blocks;
if ((layer->bias = find_array_check(arrays, bias, 6*nb_neurons*sizeof(layer->bias[0]))) == NULL) return 1;
if ((layer->subias = find_array_check(arrays, subias, 6*nb_neurons*sizeof(layer->subias[0]))) == NULL) return 1;
if ((layer->input_weights_idx = find_idx_check(arrays, input_weights_idx, nb_inputs, 3*nb_neurons, &total_blocks)) == NULL) return 1;
if ((layer->input_weights = find_array_check(arrays, input_weights, SPARSE_BLOCK_SIZE*total_blocks*sizeof(layer->input_weights[0]))) == NULL) return 1;
if ((layer->recurrent_weights = find_array_check(arrays, recurrent_weights, 3*nb_neurons*nb_neurons*sizeof(layer->recurrent_weights[0]))) == NULL) return 1;
layer->nb_inputs = nb_inputs;
layer->nb_neurons = nb_neurons;
layer->activation = activation;
layer->reset_after = reset_after;
return 0;
}
int conv2d_init(Conv2dLayer *layer, const WeightArray *arrays, int conv2d_init(Conv2dLayer *layer, const WeightArray *arrays,
const char *bias, const char *bias,
const char *float_weights, const char *float_weights,
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment