shithub: opus

Download patch

ref: c381db5688af77807561d3f525ef449c0bc0a2a3
parent: 8f6e490ba20b0c0dbf1e81cc2d52a7c8ecf5daca
author: Jean-Marc Valin <jmvalin@jmvalin.ca>
date: Thu Aug 23 20:20:10 EDT 2018

Use excitation as input

--- a/dnn/lpcnet.py
+++ b/dnn/lpcnet.py
@@ -12,6 +12,7 @@
 
 rnn_units=512
 pcm_bits = 8
+embed_size = 128
 pcm_levels = 2**pcm_bits
 nb_used_features = 38
 
@@ -42,6 +43,7 @@
 
 def new_wavernn_model():
     pcm = Input(shape=(None, 2))
+    exc = Input(shape=(None, 1))
     pitch = Input(shape=(None, 1))
     feat = Input(shape=(None, nb_used_features))
     dec_feat = Input(shape=(None, 32))
@@ -60,26 +62,27 @@
         cpcm = pcm
         cpitch = pitch
 
-    embed = Embedding(256, 128, embeddings_initializer=PCMInit())
-    cpcm = Reshape((-1, 128*2))(embed(pcm))
+    embed = Embedding(256, embed_size, embeddings_initializer=PCMInit())
+    cpcm = Reshape((-1, embed_size*2))(embed(pcm))
+    embed2 = Embedding(256, embed_size, embeddings_initializer=PCMInit())
+    cexc = Reshape((-1, embed_size))(embed2(exc))
 
-
     cfeat = fconv2(fconv1(feat))
 
     rep = Lambda(lambda x: K.repeat_elements(x, 160, 1))
 
     rnn = CuDNNGRU(rnn_units, return_sequences=True, return_state=True)
-    rnn_in = Concatenate()([cpcm, rep(cfeat)])
+    rnn_in = Concatenate()([cpcm, cexc, rep(cfeat)])
     md = MDense(pcm_levels, activation='softmax')
     gru_out, state = rnn(rnn_in)
     ulaw_prob = md(gru_out)
     
-    model = Model([pcm, feat], ulaw_prob)
+    model = Model([pcm, exc, feat], ulaw_prob)
     encoder = Model(feat, cfeat)
     
-    dec_rnn_in = Concatenate()([cpcm, dec_feat])
+    dec_rnn_in = Concatenate()([cpcm, cexc, dec_feat])
     dec_gru_out, state = rnn(dec_rnn_in, initial_state=dec_state)
     dec_ulaw_prob = md(dec_gru_out)
 
-    decoder = Model([pcm, dec_feat, dec_state], [dec_ulaw_prob, state])
+    decoder = Model([pcm, exc, dec_feat, dec_state], [dec_ulaw_prob, state])
     return model, encoder, decoder
--- a/dnn/test_wavenet_audio.py
+++ b/dnn/test_wavenet_audio.py
@@ -66,7 +66,7 @@
 out_data = np.reshape(data, (nb_frames*pcm_chunk_size, 1))
 
 
-model.load_weights('wavenet4a3_30.h5')
+model.load_weights('wavenet4b_30.h5')
 
 order = 16
 
@@ -85,19 +85,19 @@
         period = int(50*features[c, fr, 36]+100)
         period = period - 4
         for i in range(frame_size):
-            fexc[0, 0, 0] = iexc + 128
+            #fexc[0, 0, 0] = iexc + 128
             pred = -sum(a*pcm[f*frame_size + i - 1:f*frame_size + i - order-1:-1, 0])
             fexc[0, 0, 1] = np.minimum(127, lin2ulaw(pred/32768.)) + 128
 
-            p, state = dec.predict([fexc, cfeat[:, fr:fr+1, :], state])
+            p, state = dec.predict([fexc, iexc, cfeat[:, fr:fr+1, :], state])
             #p = p*p
             #p = p/(1e-18 + np.sum(p))
             p = np.maximum(p-0.001, 0)
             p = p/(1e-5 + np.sum(p))
 
-            iexc[0, 0, 0] = np.argmax(np.random.multinomial(1, p[0,0,:], 1))-128
-            pcm[f*frame_size + i, 0] = pred + 32768*ulaw2lin(iexc[0, 0, 0]*1.0)
-            iexc[0, 0, 0] = lin2ulaw(pcm[f*frame_size + i, 0]/32768)
+            iexc[0, 0, 0] = np.argmax(np.random.multinomial(1, p[0,0,:], 1))
+            pcm[f*frame_size + i, 0] = pred + 32768*ulaw2lin(iexc[0, 0, 0]-128)
+            fexc[0, 0, 0] = lin2ulaw(pcm[f*frame_size + i, 0]/32768) + 128
             print(iexc[0, 0, 0], 32768*ulaw2lin(out_data[f*frame_size + i, 0]), pcm[f*frame_size + i, 0], pred)
 
 
--- a/dnn/train_wavenet_audio.py
+++ b/dnn/train_wavenet_audio.py
@@ -46,7 +46,7 @@
 features = features[:nb_frames*feature_chunk_size*nb_features]
 
 in_data = np.concatenate([data[0:1], data[:-1]]);
-noise = np.concatenate([np.zeros((len(data)*2//5)), np.random.randint(-2, 2, len(data)//5), np.random.randint(-1, 1, len(data)*2//5)])
+noise = np.concatenate([np.zeros((len(data)*2//5)), np.random.randint(-1, 1, len(data)*3//5)])
 in_data = in_data + noise
 in_data = np.maximum(-127, np.minimum(127, in_data))
 
@@ -78,9 +78,18 @@
 
 in_data = np.reshape(in_data, (nb_frames, pcm_chunk_size, 1))
 in_data = (in_data.astype('int16')+128).astype('uint8')
-out_data = np.reshape(lin2ulaw((udata-upred)/32768), (nb_frames, pcm_chunk_size, 1))
+out_data = lin2ulaw((udata-upred)/32768)
+in_exc = np.concatenate([out_data[0:1], out_data[:-1]]);
+
+out_data = np.reshape(out_data, (nb_frames, pcm_chunk_size, 1))
 out_data = np.maximum(-127, np.minimum(127, out_data))
 out_data = (out_data.astype('int16')+128).astype('uint8')
+
+in_exc = np.reshape(in_exc, (nb_frames, pcm_chunk_size, 1))
+in_exc = np.maximum(-127, np.minimum(127, in_exc))
+in_exc = (in_exc.astype('int16')+128).astype('uint8')
+
+
 features = np.reshape(features, (nb_frames, feature_chunk_size, nb_features))
 features = features[:, :, :nb_used_features]
 pred = np.reshape(pred, (nb_frames, pcm_chunk_size, 1))
@@ -94,8 +103,8 @@
 # f.create_dataset('data', data=in_data[:50000, :, :])
 # f.create_dataset('feat', data=features[:50000, :, :])
 
-checkpoint = ModelCheckpoint('wavenet4a3_{epoch:02d}.h5')
+checkpoint = ModelCheckpoint('wavenet4b_{epoch:02d}.h5')
 
 #model.load_weights('wavernn1c_01.h5')
 model.compile(optimizer=Adam(0.001, amsgrad=True, decay=2e-4), loss='sparse_categorical_crossentropy', metrics=['sparse_categorical_accuracy'])
-model.fit([in_data, features], out_data, batch_size=batch_size, epochs=30, validation_split=0.2, callbacks=[checkpoint])
+model.fit([in_data, in_exc, features], out_data, batch_size=batch_size, epochs=30, validation_split=0.2, callbacks=[checkpoint])
--