diff --git a/dnn/training_tf2/train_lpcnet.py b/dnn/training_tf2/train_lpcnet.py
index c1abc8c8b1499097f3fbae909c6a4518a1b5ab2a..0e90a28fa65277e29c334a062d01da41083c46d7 100755
--- a/dnn/training_tf2/train_lpcnet.py
+++ b/dnn/training_tf2/train_lpcnet.py
@@ -37,17 +37,17 @@ import tensorflow.keras.backend as K
 import h5py
 
 import tensorflow as tf
-gpus = tf.config.experimental.list_physical_devices('GPU')
-if gpus:
-  try:
-    tf.config.experimental.set_virtual_device_configuration(gpus[0], [tf.config.experimental.VirtualDeviceConfiguration(memory_limit=5120)])
-  except RuntimeError as e:
-    print(e)
+#gpus = tf.config.experimental.list_physical_devices('GPU')
+#if gpus:
+#  try:
+#    tf.config.experimental.set_virtual_device_configuration(gpus[0], [tf.config.experimental.VirtualDeviceConfiguration(memory_limit=5120)])
+#  except RuntimeError as e:
+#    print(e)
 
 nb_epochs = 120
 
 # Try reducing batch_size if you run out of memory on your GPU
-batch_size = 64
+batch_size = 128
 
 model, _, _ = lpcnet.new_lpcnet_model(training=True)
 
@@ -102,15 +102,14 @@ del pred
 del in_exc
 
 # dump models to disk as we go
-checkpoint = ModelCheckpoint('lpcnet32y_384_10_G16_{epoch:02d}.h5')
+checkpoint = ModelCheckpoint('lpcnet33_384_{epoch:02d}.h5')
 
 #Set this to True to adapt an existing model (e.g. on new data)
 adaptation = False
 
-model.load_weights('lpcnet32v_384_10_G16_00.h5')
 if adaptation:
     #Adapting from an existing model
-    model.load_weights('lpcnet32v_384_10_G16_100.h5')
+    model.load_weights('lpcnet32v_384_100.h5')
     sparsify = lpcnet.Sparsify(0, 0, 1, (0.05, 0.05, 0.2))
     lr = 0.0001
     decay = 0
@@ -121,5 +120,5 @@ else:
     decay = 5e-5
 
 model.compile(optimizer=Adam(lr, decay=decay, beta_2=0.99), loss='sparse_categorical_crossentropy')
-model.save_weights('lpcnet32y_384_10_G16_00.h5');
+model.save_weights('lpcnet33_384_00.h5');
 model.fit([in_data, features, periods], out_exc, batch_size=batch_size, epochs=nb_epochs, validation_split=0.0, callbacks=[checkpoint, sparsify])