shithub: opus

Download patch

ref: 7c28191b60d761e61fe0a3262dd4d0fde040ca02
parent: 97dcf52a01c40d7e2b845bdd974a922c5c23d462
author: Jean-Marc Valin <jmvalin@jmvalin.ca>
date: Mon Oct 22 09:40:54 EDT 2018

Rename the current files to use the LPCNet name since they're no longer WaveNet

--- /dev/null
+++ b/dnn/test_lpcnet.py
@@ -1,0 +1,83 @@
+#!/usr/bin/python3
+
+import wavenet
+import lpcnet
+import sys
+import numpy as np
+from keras.optimizers import Adam
+from keras.callbacks import ModelCheckpoint
+from ulaw import ulaw2lin, lin2ulaw
+import keras.backend as K
+import h5py
+
+import tensorflow as tf
+from keras.backend.tensorflow_backend import set_session
+config = tf.ConfigProto()
+config.gpu_options.per_process_gpu_memory_fraction = 0.2
+set_session(tf.Session(config=config))
+
+nb_epochs = 40
+batch_size = 64
+
+#model = wavenet.new_wavenet_model(fftnet=True)
+model, enc, dec = lpcnet.new_wavernn_model()
+
+model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['sparse_categorical_accuracy'])
+#model.summary()
+
+feature_file = sys.argv[1]
+frame_size = 160
+nb_features = 55
+nb_used_features = lpcnet.nb_used_features
+
+features = np.fromfile(feature_file, dtype='float32')
+features = np.resize(features, (-1, nb_features))
+nb_frames = 1
+feature_chunk_size = features.shape[0]
+pcm_chunk_size = frame_size*feature_chunk_size
+
+features = np.reshape(features, (nb_frames, feature_chunk_size, nb_features))
+features[:,:,18:36] = 0
+periods = (50*features[:,:,36:37]+100).astype('int16')
+
+
+
+model.load_weights('lpcnet9_384_10_G16_120.h5')
+
+order = 16
+
+pcm = np.zeros((nb_frames*pcm_chunk_size, ))
+fexc = np.zeros((1, 1, 2), dtype='float32')
+iexc = np.zeros((1, 1, 1), dtype='int16')
+state1 = np.zeros((1, lpcnet.rnn_units1), dtype='float32')
+state2 = np.zeros((1, lpcnet.rnn_units2), dtype='float32')
+
+mem = 0
+coef = 0.85
+
+skip = order + 1
+for c in range(0, nb_frames):
+    cfeat = enc.predict([features[c:c+1, :, :nb_used_features], periods[c:c+1, :, :]])
+    for fr in range(0, feature_chunk_size):
+        f = c*feature_chunk_size + fr
+        a = features[c, fr, nb_features-order:]
+        for i in range(skip, frame_size):
+            pred = -sum(a*pcm[f*frame_size + i - 1:f*frame_size + i - order-1:-1])
+            fexc[0, 0, 1] = lin2ulaw(pred)
+
+            p, state1, state2 = dec.predict([fexc, iexc, cfeat[:, fr:fr+1, :], state1, state2])
+            #Lower the temperature for voiced frames to reduce noisiness
+            p *= np.power(p, np.maximum(0, 1.5*features[c, fr, 37] - .5))
+            p = p/(1e-18 + np.sum(p))
+            #Cut off the tail of the remaining distribution
+            p = np.maximum(p-0.002, 0).astype('float64')
+            p = p/(1e-8 + np.sum(p))
+
+            iexc[0, 0, 0] = np.argmax(np.random.multinomial(1, p[0,0,:], 1))
+            pcm[f*frame_size + i] = pred + ulaw2lin(iexc[0, 0, 0])
+            fexc[0, 0, 0] = lin2ulaw(pcm[f*frame_size + i])
+            mem = coef*mem + pcm[f*frame_size + i]
+            print(mem)
+        skip = 0
+
+
--- a/dnn/test_wavenet_audio.py
+++ /dev/null
@@ -1,83 +1,0 @@
-#!/usr/bin/python3
-
-import wavenet
-import lpcnet
-import sys
-import numpy as np
-from keras.optimizers import Adam
-from keras.callbacks import ModelCheckpoint
-from ulaw import ulaw2lin, lin2ulaw
-import keras.backend as K
-import h5py
-
-import tensorflow as tf
-from keras.backend.tensorflow_backend import set_session
-config = tf.ConfigProto()
-config.gpu_options.per_process_gpu_memory_fraction = 0.2
-set_session(tf.Session(config=config))
-
-nb_epochs = 40
-batch_size = 64
-
-#model = wavenet.new_wavenet_model(fftnet=True)
-model, enc, dec = lpcnet.new_wavernn_model()
-
-model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['sparse_categorical_accuracy'])
-#model.summary()
-
-feature_file = sys.argv[1]
-frame_size = 160
-nb_features = 55
-nb_used_features = lpcnet.nb_used_features
-
-features = np.fromfile(feature_file, dtype='float32')
-features = np.resize(features, (-1, nb_features))
-nb_frames = 1
-feature_chunk_size = features.shape[0]
-pcm_chunk_size = frame_size*feature_chunk_size
-
-features = np.reshape(features, (nb_frames, feature_chunk_size, nb_features))
-features[:,:,18:36] = 0
-periods = (50*features[:,:,36:37]+100).astype('int16')
-
-
-
-model.load_weights('lpcnet9_384_10_G16_120.h5')
-
-order = 16
-
-pcm = np.zeros((nb_frames*pcm_chunk_size, ))
-fexc = np.zeros((1, 1, 2), dtype='float32')
-iexc = np.zeros((1, 1, 1), dtype='int16')
-state1 = np.zeros((1, lpcnet.rnn_units1), dtype='float32')
-state2 = np.zeros((1, lpcnet.rnn_units2), dtype='float32')
-
-mem = 0
-coef = 0.85
-
-skip = order + 1
-for c in range(0, nb_frames):
-    cfeat = enc.predict([features[c:c+1, :, :nb_used_features], periods[c:c+1, :, :]])
-    for fr in range(0, feature_chunk_size):
-        f = c*feature_chunk_size + fr
-        a = features[c, fr, nb_features-order:]
-        for i in range(skip, frame_size):
-            pred = -sum(a*pcm[f*frame_size + i - 1:f*frame_size + i - order-1:-1])
-            fexc[0, 0, 1] = lin2ulaw(pred)
-
-            p, state1, state2 = dec.predict([fexc, iexc, cfeat[:, fr:fr+1, :], state1, state2])
-            #Lower the temperature for voiced frames to reduce noisiness
-            p *= np.power(p, np.maximum(0, 1.5*features[c, fr, 37] - .5))
-            p = p/(1e-18 + np.sum(p))
-            #Cut off the tail of the remaining distribution
-            p = np.maximum(p-0.002, 0).astype('float64')
-            p = p/(1e-8 + np.sum(p))
-
-            iexc[0, 0, 0] = np.argmax(np.random.multinomial(1, p[0,0,:], 1))
-            pcm[f*frame_size + i] = pred + ulaw2lin(iexc[0, 0, 0])
-            fexc[0, 0, 0] = lin2ulaw(pcm[f*frame_size + i])
-            mem = coef*mem + pcm[f*frame_size + i]
-            print(mem)
-        skip = 0
-
-
--- /dev/null
+++ b/dnn/train_lpcnet.py
@@ -1,0 +1,127 @@
+#!/usr/bin/python3
+# train_wavenet_audio.py
+# Jean-Marc Valin
+#
+# Train a CELPNet model (note not a Wavenet model)
+
+import wavenet
+import lpcnet
+import sys
+import numpy as np
+from keras.optimizers import Adam
+from keras.callbacks import ModelCheckpoint
+from ulaw import ulaw2lin, lin2ulaw
+import keras.backend as K
+import h5py
+
+import tensorflow as tf
+from keras.backend.tensorflow_backend import set_session
+config = tf.ConfigProto()
+
+# use this option to reserve GPU memory, e.g. for running more than
+# one thing at a time.  Best to disable for GPUs with small memory
+config.gpu_options.per_process_gpu_memory_fraction = 0.44
+
+set_session(tf.Session(config=config))
+
+nb_epochs = 40
+
+# Try reducing batch_size if you run out of memory on your GPU
+batch_size = 64
+
+# Note we are creating a CELPNet model
+
+#model = wavenet.new_wavenet_model(fftnet=True)
+model, _, _ = lpcnet.new_wavernn_model()
+
+model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['sparse_categorical_accuracy'])
+model.summary()
+
+exc_file = sys.argv[1]     # not used at present
+feature_file = sys.argv[2]
+pred_file = sys.argv[3]    # LPC predictor samples. Not used at present, see below
+pcm_file = sys.argv[4]     # 16 bit unsigned short PCM samples
+frame_size = 160
+nb_features = 55
+nb_used_features = lpcnet.nb_used_features
+feature_chunk_size = 15
+pcm_chunk_size = frame_size*feature_chunk_size
+
+# u for unquantised, load 16 bit PCM samples and convert to mu-law
+
+udata = np.fromfile(pcm_file, dtype='int16')
+data = lin2ulaw(udata)
+nb_frames = len(data)//pcm_chunk_size
+
+features = np.fromfile(feature_file, dtype='float32')
+
+# limit to discrete number of frames
+data = data[:nb_frames*pcm_chunk_size]
+udata = udata[:nb_frames*pcm_chunk_size]
+features = features[:nb_frames*feature_chunk_size*nb_features]
+
+# Noise injection: the idea is that the real system is going to be
+# predicting samples based on previously predicted samples rather than
+# from the original. Since the previously predicted samples aren't
+# expected to be so good, I add noise to the training data.  Exactly
+# how the noise is added makes a huge difference
+
+in_data = np.concatenate([data[0:1], data[:-1]]);
+noise = np.concatenate([np.zeros((len(data)*1//5)), np.random.randint(-3, 3, len(data)*1//5), np.random.randint(-2, 2, len(data)*1//5), np.random.randint(-1, 1, len(data)*2//5)])
+#noise = np.round(np.concatenate([np.zeros((len(data)*1//5)), np.random.laplace(0, 1.2, len(data)*1//5), np.random.laplace(0, .77, len(data)*1//5), np.random.laplace(0, .33, len(data)*1//5), np.random.randint(-1, 1, len(data)*1//5)]))
+in_data = in_data + noise
+in_data = np.clip(in_data, 0, 255)
+
+features = np.reshape(features, (nb_frames*feature_chunk_size, nb_features))
+
+# Note: the LPC predictor output is now calculated by the loop below, this code was
+# for an ealier version that implemented the prediction filter in C
+
+upred = np.fromfile(pred_file, dtype='int16')
+upred = upred[:nb_frames*pcm_chunk_size]
+
+# Use 16th order LPC to generate LPC prediction output upred[] and (in
+# mu-law form) pred[]
+
+pred_in = ulaw2lin(in_data)
+for i in range(2, nb_frames*feature_chunk_size):
+    upred[i*frame_size:(i+1)*frame_size] = 0
+    for k in range(16):
+        upred[i*frame_size:(i+1)*frame_size] = upred[i*frame_size:(i+1)*frame_size] - \
+            pred_in[i*frame_size-k:(i+1)*frame_size-k]*features[i, nb_features-16+k]
+
+pred = lin2ulaw(upred)
+
+in_data = np.reshape(in_data, (nb_frames, pcm_chunk_size, 1))
+in_data = in_data.astype('uint8')
+
+# LPC residual, which is the difference between the input speech and
+# the predictor output, with a slight time shift this is also the
+# ideal excitation in_exc
+
+out_data = lin2ulaw(udata-upred)
+in_exc = np.concatenate([out_data[0:1], out_data[:-1]]);
+
+out_data = np.reshape(out_data, (nb_frames, pcm_chunk_size, 1))
+out_data = out_data.astype('uint8')
+
+in_exc = np.reshape(in_exc, (nb_frames, pcm_chunk_size, 1))
+in_exc = in_exc.astype('uint8')
+
+
+features = np.reshape(features, (nb_frames, feature_chunk_size, nb_features))
+features = features[:, :, :nb_used_features]
+features[:,:,18:36] = 0
+pred = np.reshape(pred, (nb_frames, pcm_chunk_size, 1))
+pred = pred.astype('uint8')
+
+periods = (50*features[:,:,36:37]+100).astype('int16')
+
+in_data = np.concatenate([in_data, pred], axis=-1)
+
+# dump models to disk as we go
+checkpoint = ModelCheckpoint('lpcnet9_384_10_G16_{epoch:02d}.h5')
+
+#model.load_weights('wavenet4f2_30.h5')
+model.compile(optimizer=Adam(0.001, amsgrad=True, decay=5e-5), loss='sparse_categorical_crossentropy', metrics=['sparse_categorical_accuracy'])
+model.fit([in_data, in_exc, features, periods], out_data, batch_size=batch_size, epochs=120, validation_split=0.0, callbacks=[checkpoint, lpcnet.Sparsify(2000, 40000, 400, 0.1)])
--- a/dnn/train_wavenet_audio.py
+++ /dev/null
@@ -1,127 +1,0 @@
-#!/usr/bin/python3
-# train_wavenet_audio.py
-# Jean-Marc Valin
-#
-# Train a CELPNet model (note not a Wavenet model)
-
-import wavenet
-import lpcnet
-import sys
-import numpy as np
-from keras.optimizers import Adam
-from keras.callbacks import ModelCheckpoint
-from ulaw import ulaw2lin, lin2ulaw
-import keras.backend as K
-import h5py
-
-import tensorflow as tf
-from keras.backend.tensorflow_backend import set_session
-config = tf.ConfigProto()
-
-# use this option to reserve GPU memory, e.g. for running more than
-# one thing at a time.  Best to disable for GPUs with small memory
-config.gpu_options.per_process_gpu_memory_fraction = 0.44
-
-set_session(tf.Session(config=config))
-
-nb_epochs = 40
-
-# Try reducing batch_size if you run out of memory on your GPU
-batch_size = 64
-
-# Note we are creating a CELPNet model
-
-#model = wavenet.new_wavenet_model(fftnet=True)
-model, _, _ = lpcnet.new_wavernn_model()
-
-model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['sparse_categorical_accuracy'])
-model.summary()
-
-exc_file = sys.argv[1]     # not used at present
-feature_file = sys.argv[2]
-pred_file = sys.argv[3]    # LPC predictor samples. Not used at present, see below
-pcm_file = sys.argv[4]     # 16 bit unsigned short PCM samples
-frame_size = 160
-nb_features = 55
-nb_used_features = lpcnet.nb_used_features
-feature_chunk_size = 15
-pcm_chunk_size = frame_size*feature_chunk_size
-
-# u for unquantised, load 16 bit PCM samples and convert to mu-law
-
-udata = np.fromfile(pcm_file, dtype='int16')
-data = lin2ulaw(udata)
-nb_frames = len(data)//pcm_chunk_size
-
-features = np.fromfile(feature_file, dtype='float32')
-
-# limit to discrete number of frames
-data = data[:nb_frames*pcm_chunk_size]
-udata = udata[:nb_frames*pcm_chunk_size]
-features = features[:nb_frames*feature_chunk_size*nb_features]
-
-# Noise injection: the idea is that the real system is going to be
-# predicting samples based on previously predicted samples rather than
-# from the original. Since the previously predicted samples aren't
-# expected to be so good, I add noise to the training data.  Exactly
-# how the noise is added makes a huge difference
-
-in_data = np.concatenate([data[0:1], data[:-1]]);
-noise = np.concatenate([np.zeros((len(data)*1//5)), np.random.randint(-3, 3, len(data)*1//5), np.random.randint(-2, 2, len(data)*1//5), np.random.randint(-1, 1, len(data)*2//5)])
-#noise = np.round(np.concatenate([np.zeros((len(data)*1//5)), np.random.laplace(0, 1.2, len(data)*1//5), np.random.laplace(0, .77, len(data)*1//5), np.random.laplace(0, .33, len(data)*1//5), np.random.randint(-1, 1, len(data)*1//5)]))
-in_data = in_data + noise
-in_data = np.clip(in_data, 0, 255)
-
-features = np.reshape(features, (nb_frames*feature_chunk_size, nb_features))
-
-# Note: the LPC predictor output is now calculated by the loop below, this code was
-# for an ealier version that implemented the prediction filter in C
-
-upred = np.fromfile(pred_file, dtype='int16')
-upred = upred[:nb_frames*pcm_chunk_size]
-
-# Use 16th order LPC to generate LPC prediction output upred[] and (in
-# mu-law form) pred[]
-
-pred_in = ulaw2lin(in_data)
-for i in range(2, nb_frames*feature_chunk_size):
-    upred[i*frame_size:(i+1)*frame_size] = 0
-    for k in range(16):
-        upred[i*frame_size:(i+1)*frame_size] = upred[i*frame_size:(i+1)*frame_size] - \
-            pred_in[i*frame_size-k:(i+1)*frame_size-k]*features[i, nb_features-16+k]
-
-pred = lin2ulaw(upred)
-
-in_data = np.reshape(in_data, (nb_frames, pcm_chunk_size, 1))
-in_data = in_data.astype('uint8')
-
-# LPC residual, which is the difference between the input speech and
-# the predictor output, with a slight time shift this is also the
-# ideal excitation in_exc
-
-out_data = lin2ulaw(udata-upred)
-in_exc = np.concatenate([out_data[0:1], out_data[:-1]]);
-
-out_data = np.reshape(out_data, (nb_frames, pcm_chunk_size, 1))
-out_data = out_data.astype('uint8')
-
-in_exc = np.reshape(in_exc, (nb_frames, pcm_chunk_size, 1))
-in_exc = in_exc.astype('uint8')
-
-
-features = np.reshape(features, (nb_frames, feature_chunk_size, nb_features))
-features = features[:, :, :nb_used_features]
-features[:,:,18:36] = 0
-pred = np.reshape(pred, (nb_frames, pcm_chunk_size, 1))
-pred = pred.astype('uint8')
-
-periods = (50*features[:,:,36:37]+100).astype('int16')
-
-in_data = np.concatenate([in_data, pred], axis=-1)
-
-# dump models to disk as we go
-checkpoint = ModelCheckpoint('lpcnet9_384_10_G16_{epoch:02d}.h5')
-
-#model.load_weights('wavenet4f2_30.h5')
-model.compile(optimizer=Adam(0.001, amsgrad=True, decay=5e-5), loss='sparse_categorical_crossentropy', metrics=['sparse_categorical_accuracy'])
-model.fit([in_data, in_exc, features, periods], out_data, batch_size=batch_size, epochs=120, validation_split=0.0, callbacks=[checkpoint, lpcnet.Sparsify(2000, 40000, 400, 0.1)])
--