Skip to content
Snippets Groups Projects
Commit fe7b54c0 authored by Jean-Marc Valin's avatar Jean-Marc Valin
Browse files

Combine LAR+L1 regularization

parent 054d984b
No related branches found
No related tags found
No related merge requests found
import numpy as np import numpy as np
from tensorflow.keras.utils import Sequence from tensorflow.keras.utils import Sequence
def lpc2rc(lpc):
#print("shape is = ", lpc.shape)
order = lpc.shape[-1]
rc = 0*lpc
for i in range(order, 0, -1):
rc[:,:,i-1] = lpc[:,:,-1]
ki = rc[:,:,i-1:i].repeat(i-1, axis=2)
lpc = (lpc[:,:,:-1] - ki*lpc[:,:,-2::-1])/(1-ki*ki)
return rc
class LPCNetLoader(Sequence): class LPCNetLoader(Sequence):
def __init__(self, data, features, periods, batch_size): def __init__(self, data, features, periods, batch_size, lpc_out=False):
self.batch_size = batch_size self.batch_size = batch_size
self.nb_batches = np.minimum(np.minimum(data.shape[0], features.shape[0]), periods.shape[0])//self.batch_size self.nb_batches = np.minimum(np.minimum(data.shape[0], features.shape[0]), periods.shape[0])//self.batch_size
self.data = data[:self.nb_batches*self.batch_size, :] self.data = data[:self.nb_batches*self.batch_size, :]
self.features = features[:self.nb_batches*self.batch_size, :] self.features = features[:self.nb_batches*self.batch_size, :]
self.periods = periods[:self.nb_batches*self.batch_size, :] self.periods = periods[:self.nb_batches*self.batch_size, :]
self.lpc_out = lpc_out
self.on_epoch_end() self.on_epoch_end()
def on_epoch_end(self): def on_epoch_end(self):
...@@ -18,9 +29,13 @@ class LPCNetLoader(Sequence): ...@@ -18,9 +29,13 @@ class LPCNetLoader(Sequence):
data = self.data[self.indices[index*self.batch_size:(index+1)*self.batch_size], :, :] data = self.data[self.indices[index*self.batch_size:(index+1)*self.batch_size], :, :]
in_data = data[: , :, :3] in_data = data[: , :, :3]
out_data = data[: , :, 3:4] out_data = data[: , :, 3:4]
features = self.features[self.indices[index*self.batch_size:(index+1)*self.batch_size], :, :] features = self.features[self.indices[index*self.batch_size:(index+1)*self.batch_size], :, :-16]
periods = self.periods[self.indices[index*self.batch_size:(index+1)*self.batch_size], :, :] periods = self.periods[self.indices[index*self.batch_size:(index+1)*self.batch_size], :, :]
return ([in_data, features, periods], out_data) outputs = [out_data]
if self.lpc_out:
lpc = self.features[self.indices[index*self.batch_size:(index+1)*self.batch_size], 2:-2, -16:]
outputs.append(lpc2rc(lpc))
return ([in_data, features, periods], outputs)
def __len__(self): def __len__(self):
return self.nb_batches return self.nb_batches
...@@ -83,3 +83,13 @@ def metric_exc_sd(y_true,y_pred): ...@@ -83,3 +83,13 @@ def metric_exc_sd(y_true,y_pred):
e_gt = tf_l2u(tf_u2l(y_true) - tf_u2l(p)) e_gt = tf_l2u(tf_u2l(y_true) - tf_u2l(p))
sd_egt = tf.keras.losses.MeanSquaredError(reduction=tf.keras.losses.Reduction.NONE)(e_gt,128) sd_egt = tf.keras.losses.MeanSquaredError(reduction=tf.keras.losses.Reduction.NONE)(e_gt,128)
return sd_egt return sd_egt
def loss_matchlar():
def loss(y_true,y_pred):
model_rc = y_pred[:,:,:16]
#y_true = lpc2rc(y_true)
loss_lar_diff = K.log((1.01 + model_rc)/(1.01 - model_rc)) - K.log((1.01 + y_true)/(1.01 - y_true))
loss_lar_diff = tf.square(loss_lar_diff)
return tf.reduce_mean(loss_lar_diff, axis=-1)
return loss
...@@ -304,8 +304,8 @@ def new_lpcnet_model(rnn_units1=384, rnn_units2=16, nb_used_features=20, batch_s ...@@ -304,8 +304,8 @@ def new_lpcnet_model(rnn_units1=384, rnn_units2=16, nb_used_features=20, batch_s
if not flag_e2e: if not flag_e2e:
model = Model([pcm, feat, pitch], ulaw_prob) model = Model([pcm, feat, pitch], ulaw_prob)
else: else:
m_out = Concatenate()([tensor_preds,ulaw_prob]) m_out = Concatenate(name='pdf')([tensor_preds,ulaw_prob])
model = Model([pcm, feat, pitch], m_out) model = Model([pcm, feat, pitch], [m_out, cfeat])
model.rnn_units1 = rnn_units1 model.rnn_units1 = rnn_units1
model.rnn_units2 = rnn_units2 model.rnn_units2 = rnn_units2
model.nb_used_features = nb_used_features model.nb_used_features = nb_used_features
......
...@@ -125,7 +125,7 @@ with strategy.scope(): ...@@ -125,7 +125,7 @@ with strategy.scope():
if not flag_e2e: if not flag_e2e:
model.compile(optimizer=opt, loss='sparse_categorical_crossentropy', metrics='sparse_categorical_crossentropy') model.compile(optimizer=opt, loss='sparse_categorical_crossentropy', metrics='sparse_categorical_crossentropy')
else: else:
model.compile(optimizer=opt, loss = interp_mulaw(gamma=gamma),metrics=[metric_cel,metric_icel,metric_exc_sd,metric_oginterploss]) model.compile(optimizer=opt, loss = [interp_mulaw(gamma=gamma), loss_matchlar()], loss_weights = [1.0, 2.0], metrics={'pdf':[metric_cel,metric_icel,metric_exc_sd,metric_oginterploss]})
model.summary() model.summary()
feature_file = args.features feature_file = args.features
...@@ -157,7 +157,7 @@ data = np.reshape(data, (nb_frames, pcm_chunk_size, 4)) ...@@ -157,7 +157,7 @@ data = np.reshape(data, (nb_frames, pcm_chunk_size, 4))
sizeof = features.strides[-1] sizeof = features.strides[-1]
features = np.lib.stride_tricks.as_strided(features, shape=(nb_frames, feature_chunk_size+4, nb_features), features = np.lib.stride_tricks.as_strided(features, shape=(nb_frames, feature_chunk_size+4, nb_features),
strides=(feature_chunk_size*nb_features*sizeof, nb_features*sizeof, sizeof)) strides=(feature_chunk_size*nb_features*sizeof, nb_features*sizeof, sizeof))
features = features[:, :, :nb_used_features] #features = features[:, :, :nb_used_features]
periods = (.1 + 50*features[:,:,18:19]+100).astype('int16') periods = (.1 + 50*features[:,:,18:19]+100).astype('int16')
...@@ -185,5 +185,5 @@ else: ...@@ -185,5 +185,5 @@ else:
model.save_weights('{}_{}_initial.h5'.format(args.output, args.grua_size)) model.save_weights('{}_{}_initial.h5'.format(args.output, args.grua_size))
csv_logger = CSVLogger('training_vals.log') csv_logger = CSVLogger('training_vals.log')
loader = LPCNetLoader(data, features, periods, batch_size) loader = LPCNetLoader(data, features, periods, batch_size, lpc_out=flag_e2e)
model.fit(loader, epochs=nb_epochs, validation_split=0.0, callbacks=[checkpoint, sparsify, grub_sparsify, csv_logger]) model.fit(loader, epochs=nb_epochs, validation_split=0.0, callbacks=[checkpoint, sparsify, grub_sparsify, csv_logger])
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment