diff --git a/dnn/training_tf2/dataloader.py b/dnn/training_tf2/dataloader.py
index b4f1f18645a3eaf2d74b4cd80a4220c95925474f..93c38255947bc9909bef435cc2c907aa052bef67 100644
--- a/dnn/training_tf2/dataloader.py
+++ b/dnn/training_tf2/dataloader.py
@@ -1,13 +1,24 @@
 import numpy as np
 from tensorflow.keras.utils import Sequence
 
+def lpc2rc(lpc):
+    #print("shape is = ", lpc.shape)
+    order = lpc.shape[-1]
+    rc = 0*lpc
+    for i in range(order, 0, -1):
+        rc[:,:,i-1] = lpc[:,:,-1]
+        ki = rc[:,:,i-1:i].repeat(i-1, axis=2)
+        lpc = (lpc[:,:,:-1] - ki*lpc[:,:,-2::-1])/(1-ki*ki)
+    return rc
+
 class LPCNetLoader(Sequence):
-    def __init__(self, data, features, periods, batch_size):
+    def __init__(self, data, features, periods, batch_size, lpc_out=False):
         self.batch_size = batch_size
         self.nb_batches = np.minimum(np.minimum(data.shape[0], features.shape[0]), periods.shape[0])//self.batch_size
         self.data = data[:self.nb_batches*self.batch_size, :]
         self.features = features[:self.nb_batches*self.batch_size, :]
         self.periods = periods[:self.nb_batches*self.batch_size, :]
+        self.lpc_out = lpc_out
         self.on_epoch_end()
 
     def on_epoch_end(self):
@@ -18,9 +29,13 @@ class LPCNetLoader(Sequence):
         data = self.data[self.indices[index*self.batch_size:(index+1)*self.batch_size], :, :]
         in_data = data[: , :, :3]
         out_data = data[: , :, 3:4]
-        features = self.features[self.indices[index*self.batch_size:(index+1)*self.batch_size], :, :]
+        features = self.features[self.indices[index*self.batch_size:(index+1)*self.batch_size], :, :-16]
         periods = self.periods[self.indices[index*self.batch_size:(index+1)*self.batch_size], :, :]
-        return ([in_data, features, periods], out_data)
+        outputs = [out_data]
+        if self.lpc_out:
+            lpc = self.features[self.indices[index*self.batch_size:(index+1)*self.batch_size], 2:-2, -16:]
+            outputs.append(lpc2rc(lpc))
+        return ([in_data, features, periods], outputs)
 
     def __len__(self):
         return self.nb_batches
diff --git a/dnn/training_tf2/lossfuncs.py b/dnn/training_tf2/lossfuncs.py
index 8a627eadbbbe17d46697bd1c4fae6b07908b1055..f858f3548e5b8139fd16f313e95c0f40a99c36b2 100644
--- a/dnn/training_tf2/lossfuncs.py
+++ b/dnn/training_tf2/lossfuncs.py
@@ -83,3 +83,13 @@ def metric_exc_sd(y_true,y_pred):
     e_gt = tf_l2u(tf_u2l(y_true) - tf_u2l(p))
     sd_egt = tf.keras.losses.MeanSquaredError(reduction=tf.keras.losses.Reduction.NONE)(e_gt,128)
     return sd_egt
+
+def loss_matchlar():
+    def loss(y_true,y_pred):
+        model_rc = y_pred[:,:,:16]
+        #y_true = lpc2rc(y_true)
+        loss_lar_diff = K.log((1.01 + model_rc)/(1.01 - model_rc)) - K.log((1.01 + y_true)/(1.01 - y_true))
+        loss_lar_diff = tf.square(loss_lar_diff)
+        return tf.reduce_mean(loss_lar_diff, axis=-1)
+    return loss
+
diff --git a/dnn/training_tf2/lpcnet.py b/dnn/training_tf2/lpcnet.py
index 46e66bcef9534425fa26475548f160ca9c715f70..39735c6e369670d13eb669d4238940e530644909 100644
--- a/dnn/training_tf2/lpcnet.py
+++ b/dnn/training_tf2/lpcnet.py
@@ -304,8 +304,8 @@ def new_lpcnet_model(rnn_units1=384, rnn_units2=16, nb_used_features=20, batch_s
     if not flag_e2e:
         model = Model([pcm, feat, pitch], ulaw_prob)
     else:
-        m_out = Concatenate()([tensor_preds,ulaw_prob])
-        model = Model([pcm, feat, pitch], m_out)
+        m_out = Concatenate(name='pdf')([tensor_preds,ulaw_prob])
+        model = Model([pcm, feat, pitch], [m_out, cfeat])
     model.rnn_units1 = rnn_units1
     model.rnn_units2 = rnn_units2
     model.nb_used_features = nb_used_features
diff --git a/dnn/training_tf2/train_lpcnet.py b/dnn/training_tf2/train_lpcnet.py
index a6186bff40aa4ccf7ac5d97d7253cd4f3813a271..1469563f2c3852864749aa3f3f867322d47e1db1 100755
--- a/dnn/training_tf2/train_lpcnet.py
+++ b/dnn/training_tf2/train_lpcnet.py
@@ -125,7 +125,7 @@ with strategy.scope():
     if not flag_e2e:
         model.compile(optimizer=opt, loss='sparse_categorical_crossentropy', metrics='sparse_categorical_crossentropy')
     else:
-        model.compile(optimizer=opt, loss = interp_mulaw(gamma=gamma),metrics=[metric_cel,metric_icel,metric_exc_sd,metric_oginterploss])
+        model.compile(optimizer=opt, loss = [interp_mulaw(gamma=gamma), loss_matchlar()], loss_weights = [1.0, 2.0], metrics={'pdf':[metric_cel,metric_icel,metric_exc_sd,metric_oginterploss]})
     model.summary()
 
 feature_file = args.features
@@ -157,7 +157,7 @@ data = np.reshape(data, (nb_frames, pcm_chunk_size, 4))
 sizeof = features.strides[-1]
 features = np.lib.stride_tricks.as_strided(features, shape=(nb_frames, feature_chunk_size+4, nb_features),
                                            strides=(feature_chunk_size*nb_features*sizeof, nb_features*sizeof, sizeof))
-features = features[:, :, :nb_used_features]
+#features = features[:, :, :nb_used_features]
 
 
 periods = (.1 + 50*features[:,:,18:19]+100).astype('int16')
@@ -185,5 +185,5 @@ else:
 
 model.save_weights('{}_{}_initial.h5'.format(args.output, args.grua_size))
 csv_logger = CSVLogger('training_vals.log')
-loader = LPCNetLoader(data, features, periods, batch_size)
+loader = LPCNetLoader(data, features, periods, batch_size, lpc_out=flag_e2e)
 model.fit(loader, epochs=nb_epochs, validation_split=0.0, callbacks=[checkpoint, sparsify, grub_sparsify, csv_logger])