ref: a263f7c1f5ae1f91fc753943ddeb4d89da973810
parent: beaa370a7f9feff05a5fc711580e819ffcad4a59
author: David Rowe <david@rowetel.com>
date: Sun Oct 14 08:36:56 EDT 2018
Adding comments
--- a/dnn/train_wavenet_audio.py
+++ b/dnn/train_wavenet_audio.py
@@ -1,4 +1,8 @@
#!/usr/bin/python3
+# train_wavenet_audio.py
+# Jean-Marc Valin
+#
+# Train a CELPNet model (note not a Wavenet model)
import wavenet
import lpcnet
@@ -13,12 +17,20 @@
import tensorflow as tf
from keras.backend.tensorflow_backend import set_session
config = tf.ConfigProto()
+
+# use this option to reserve GPU memory, e.g. for running more than
+# one thing at a time. Best to disable for GPUs with small memory
config.gpu_options.per_process_gpu_memory_fraction = 0.44
+
set_session(tf.Session(config=config))
nb_epochs = 40
+
+# Try reducing batch_size if you run out of memory on your GPU
batch_size = 64
+# Note we are creating a CELPNet model
+
#model = wavenet.new_wavenet_model(fftnet=True)
model, _, _ = lpcnet.new_wavernn_model()
@@ -25,10 +37,10 @@
model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['sparse_categorical_accuracy'])
model.summary()
-exc_file = sys.argv[1]
+exc_file = sys.argv[1] # not used at present
feature_file = sys.argv[2]
-pred_file = sys.argv[3]
-pcm_file = sys.argv[4]
+pred_file = sys.argv[3] # LPC predictor samples. Not used at present, see below
+pcm_file = sys.argv[4] # 16 bit unsigned short PCM samples
frame_size = 160
nb_features = 55
nb_used_features = lpcnet.nb_used_features
@@ -35,6 +47,8 @@
feature_chunk_size = 15
pcm_chunk_size = frame_size*feature_chunk_size
+# u for unquantised, load 16 bit PCM samples and convert to mu-law
+
udata = np.fromfile(pcm_file, dtype='int16')
data = lin2ulaw(udata)
nb_frames = len(data)//pcm_chunk_size
@@ -41,10 +55,17 @@
features = np.fromfile(feature_file, dtype='float32')
+# limit to discrete number of frames
data = data[:nb_frames*pcm_chunk_size]
udata = udata[:nb_frames*pcm_chunk_size]
features = features[:nb_frames*feature_chunk_size*nb_features]
+# Noise injection: the idea is that the real system is going to be
+# predicting samples based on previously predicted samples rather than
+# from the original. Since the previously predicted samples aren't
+# expected to be so good, I add noise to the training data. Exactly
+# how the noise is added makes a huge difference
+
in_data = np.concatenate([data[0:1], data[:-1]]);
noise = np.concatenate([np.zeros((len(data)*1//5)), np.random.randint(-3, 3, len(data)*1//5), np.random.randint(-2, 2, len(data)*1//5), np.random.randint(-1, 1, len(data)*2//5)])
in_data = in_data + noise
@@ -52,9 +73,15 @@
features = np.reshape(features, (nb_frames*feature_chunk_size, nb_features))
+# Note: the LPC predictor output is now calculated by the loop below, this code was
+# for an ealier version that implemented the prediction filter in C
+
upred = np.fromfile(pred_file, dtype='int16')
upred = upred[:nb_frames*pcm_chunk_size]
+# Use 16th order LPC to generate LPC prediction output upred[] and (in
+# mu-law form) pred[]
+
pred_in = ulaw2lin(in_data)
for i in range(2, nb_frames*feature_chunk_size):
upred[i*frame_size:(i+1)*frame_size] = 0
@@ -64,9 +91,13 @@
pred = lin2ulaw(upred)
-
in_data = np.reshape(in_data, (nb_frames, pcm_chunk_size, 1))
in_data = in_data.astype('uint8')+
+# LPC residual, which is the difference between the input speech and
+# the predictor output, with a slight time shift this is also the
+# ideal excitation in_exc
+
out_data = lin2ulaw(udata-upred)
in_exc = np.concatenate([out_data[0:1], out_data[:-1]]);
@@ -86,6 +117,7 @@
in_data = np.concatenate([in_data, pred], axis=-1)
+# dump models to disk as we go
checkpoint = ModelCheckpoint('wavenet5d0_{epoch:02d}.h5') #model.load_weights('wavenet4f2_30.h5')--
⑨