shithub: opus

Download patch

ref: 785a2b2e84f955dd3013536b35529bb9fa1afb6b
parent: 3d20cdaed4f2303ce8b50885cabe07c39fd22025
author: Jean-Marc Valin <jmvalin@jmvalin.ca>
date: Thu Aug 16 09:58:33 EDT 2018

Predicting pre-emphasized audio

--- a/dnn/lpcnet.py
+++ b/dnn/lpcnet.py
@@ -10,7 +10,7 @@
 import h5py
 import sys
 
-rnn_units=64
+rnn_units=512
 pcm_bits = 8
 pcm_levels = 2**pcm_bits
 nb_used_features = 38
@@ -41,7 +41,7 @@
         }
 
 def new_wavernn_model():
-    pcm = Input(shape=(None, 2))
+    pcm = Input(shape=(None, 1))
     pitch = Input(shape=(None, 1))
     feat = Input(shape=(None, nb_used_features))
     dec_feat = Input(shape=(None, 32))
@@ -61,7 +61,7 @@
         cpitch = pitch
 
     embed = Embedding(256, 128, embeddings_initializer=PCMInit())
-    cpcm = Reshape((-1, 128*2))(embed(pcm))
+    cpcm = Reshape((-1, 128))(embed(pcm))
 
 
     cfeat = fconv2(fconv1(feat))
--- a/dnn/test_wavenet_audio.py
+++ b/dnn/test_wavenet_audio.py
@@ -23,18 +23,18 @@
 model, enc, dec = lpcnet.new_wavernn_model()
 
 model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['sparse_categorical_accuracy'])
-model.summary()
+#model.summary()
 
 pcmfile = sys.argv[1]
 feature_file = sys.argv[2]
 frame_size = 160
-nb_features = 54
+nb_features = 55
 nb_used_features = wavenet.nb_used_features
 feature_chunk_size = 15
 pcm_chunk_size = frame_size*feature_chunk_size
 
 data = np.fromfile(pcmfile, dtype='int16')
-data = np.minimum(127, lin2ulaw(data[80:]/32768.))
+data = np.minimum(127, lin2ulaw(data/32768.))
 nb_frames = len(data)//pcm_chunk_size
 
 features = np.fromfile(feature_file, dtype='float32')
@@ -66,7 +66,7 @@
 out_data = np.reshape(data, (nb_frames*pcm_chunk_size, 1))
 
 
-model.load_weights('wavenet3g_30.h5')
+model.load_weights('wavenet3h12_30.h5')
 
 order = 16
 
@@ -92,7 +92,7 @@
             #fexc[0, 0, 0] = in_data[f*frame_size + i, 0]
             #print(cfeat.shape)
             p, state = dec.predict([fexc, cfeat[:, fr:fr+1, :], state])
-            #p = np.maximum(p-0.003, 0)
+            p = np.maximum(p-0.0003, 0)
             p = p/(1e-5 + np.sum(p))
             #print(np.sum(p))
             iexc[0, 0, 0] = np.argmax(np.random.multinomial(1, p[0,0,:], 1))-128
--- a/dnn/train_wavenet_audio.py
+++ b/dnn/train_wavenet_audio.py
@@ -30,7 +30,7 @@
 pred_file = sys.argv[3]
 pcm_file = sys.argv[4]
 frame_size = 160
-nb_features = 54
+nb_features = 55
 nb_used_features = wavenet.nb_used_features
 feature_chunk_size = 15
 pcm_chunk_size = frame_size*feature_chunk_size
@@ -72,7 +72,7 @@
 pred = np.reshape(pred, (nb_frames, pcm_chunk_size, 1))
 pred = (pred.astype('int16')+128).astype('uint8')
 
-in_data = np.concatenate([in_data, pred], axis=-1)
+#in_data = np.concatenate([in_data, pred], axis=-1)
 
 #in_data = np.concatenate([in_data, in_pitch], axis=-1)
 
@@ -80,7 +80,7 @@
 # f.create_dataset('data', data=in_data[:50000, :, :])
 # f.create_dataset('feat', data=features[:50000, :, :])
 
-checkpoint = ModelCheckpoint('wavenet3h9_{epoch:02d}.h5')
+checkpoint = ModelCheckpoint('wavenet3h13_{epoch:02d}.h5')
 
 #model.load_weights('wavernn1c_01.h5')
 model.compile(optimizer=Adam(0.001, amsgrad=True, decay=2e-4), loss='sparse_categorical_crossentropy', metrics=['sparse_categorical_accuracy'])
--