Commit 4d1e630a by Jean-Marc Valin

### constraining the weights to +/- 0.5 so they can be quantized with 8 bits

parent 54eeea72
 ... ... @@ -29,7 +29,7 @@ #include "opus_types.h" #define WEIGHTS_SCALE (1.f/8192) #define WEIGHTS_SCALE (1.f/256) #define MAX_NEURONS 128 ... ... @@ -37,18 +37,20 @@ #define ACTIVATION_SIGMOID 1 #define ACTIVATION_RELU 2 typedef signed char rnn_weight; typedef struct { const opus_int16 *bias; const opus_int16 *input_weights; const rnn_weight *bias; const rnn_weight *input_weights; int nb_inputs; int nb_neurons; int activation; } DenseLayer; typedef struct { const opus_int16 *bias; const opus_int16 *input_weights; const opus_int16 *recurrent_weights; const rnn_weight *bias; const rnn_weight *input_weights; const rnn_weight *recurrent_weights; int nb_inputs; int nb_neurons; int activation; ... ...
This diff is collapsed.
 ... ... @@ -15,9 +15,9 @@ import numpy as np def printVector(f, vector, name): v = np.reshape(vector, (-1)); #print('static const float ', name, '[', len(v), '] = \n', file=f) f.write('static const opus_int16 {}[{}] = {{\n '.format(name, len(v))) f.write('static const rnn_weight {}[{}] = {{\n '.format(name, len(v))) for i in range(0, len(v)): f.write('{}'.format(int(round(8192*v[i])))) f.write('{}'.format(min(127, int(round(256*v[i]))))) if (i!=len(v)-1): f.write(',') else: ... ... @@ -50,11 +50,14 @@ def printLayer(f, hf, layer): hf.write('extern const DenseLayer {};\n\n'.format(name)); def foo(c, name): return 1 def mean_squared_sqrt_error(y_true, y_pred): return K.mean(K.square(K.sqrt(y_pred) - K.sqrt(y_true)), axis=-1) model = load_model(sys.argv[1], custom_objects={'msse': mean_squared_sqrt_error, 'mean_squared_sqrt_error': mean_squared_sqrt_error, 'my_crossentropy': mean_squared_sqrt_error, 'mycost': mean_squared_sqrt_error}) model = load_model(sys.argv[1], custom_objects={'msse': mean_squared_sqrt_error, 'mean_squared_sqrt_error': mean_squared_sqrt_error, 'my_crossentropy': mean_squared_sqrt_error, 'mycost': mean_squared_sqrt_error, 'WeightClip': foo}) weights = model.get_weights() ... ...
 ... ... @@ -14,8 +14,10 @@ from keras.layers import Dropout from keras.layers import concatenate from keras import losses from keras import regularizers from keras.constraints import min_max_norm import h5py from keras.constraints import Constraint from keras import backend as K import numpy as np ... ... @@ -41,20 +43,34 @@ def mycost(y_true, y_pred): def my_accuracy(y_true, y_pred): return K.mean(2*K.abs(y_true-0.5) * K.equal(y_true, K.round(y_pred)), axis=-1) class WeightClip(Constraint): '''Clips the weights incident to each hidden unit to be inside a range ''' def __init__(self, c=2): self.c = c def __call__(self, p): return K.clip(p, -self.c, self.c) def get_config(self): return {'name': self.__class__.__name__, 'c': self.c} reg = 0.000001 constraint = WeightClip(0.499) print('Build model...') main_input = Input(shape=(None, 42), name='main_input') tmp = Dense(24, activation='tanh', name='input_dense')(main_input) vad_gru = GRU(24, activation='tanh', recurrent_activation='sigmoid', return_sequences=True, name='vad_gru', kernel_regularizer=regularizers.l2(reg), recurrent_regularizer=regularizers.l2(reg))(tmp) vad_output = Dense(1, activation='sigmoid', name='vad_output')(vad_gru) tmp = Dense(24, activation='tanh', name='input_dense', kernel_constraint=constraint, bias_constraint=constraint)(main_input) vad_gru = GRU(24, activation='tanh', recurrent_activation='sigmoid', return_sequences=True, name='vad_gru', kernel_regularizer=regularizers.l2(reg), recurrent_regularizer=regularizers.l2(reg), kernel_constraint=constraint, recurrent_constraint=constraint, bias_constraint=constraint)(tmp) vad_output = Dense(1, activation='sigmoid', name='vad_output', kernel_constraint=constraint, bias_constraint=constraint)(vad_gru) noise_input = keras.layers.concatenate([tmp, vad_gru, main_input]) noise_gru = GRU(48, activation='relu', recurrent_activation='sigmoid', return_sequences=True, name='noise_gru', kernel_regularizer=regularizers.l2(reg), recurrent_regularizer=regularizers.l2(reg))(noise_input) noise_gru = GRU(48, activation='relu', recurrent_activation='sigmoid', return_sequences=True, name='noise_gru', kernel_regularizer=regularizers.l2(reg), recurrent_regularizer=regularizers.l2(reg), kernel_constraint=constraint, recurrent_constraint=constraint, bias_constraint=constraint)(noise_input) denoise_input = keras.layers.concatenate([vad_gru, noise_gru, main_input]) denoise_gru = GRU(96, activation='tanh', recurrent_activation='sigmoid', return_sequences=True, name='denoise_gru', kernel_regularizer=regularizers.l2(reg), recurrent_regularizer=regularizers.l2(reg))(denoise_input) denoise_gru = GRU(96, activation='tanh', recurrent_activation='sigmoid', return_sequences=True, name='denoise_gru', kernel_regularizer=regularizers.l2(reg), recurrent_regularizer=regularizers.l2(reg), kernel_constraint=constraint, recurrent_constraint=constraint, bias_constraint=constraint)(denoise_input) denoise_output = Dense(22, activation='sigmoid', name='denoise_output')(denoise_gru) denoise_output = Dense(22, activation='sigmoid', name='denoise_output', kernel_constraint=constraint, bias_constraint=constraint)(denoise_gru) model = Model(inputs=main_input, outputs=[denoise_output, vad_output]) ... ... @@ -95,6 +111,6 @@ print(len(x_train), 'train sequences. x shape =', x_train.shape, 'y shape = ', y print('Train...') model.fit(x_train, [y_train, vad_train], batch_size=batch_size, epochs=60, epochs=120, validation_split=0.1) model.save("newweights6a2a.hdf5") model.save("newweights6c.hdf5")
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!