ref: 3d20cdaed4f2303ce8b50885cabe07c39fd22025
parent: 4fec1144f3ff23e4740550f9201c25cf180dcfa4
author: Jean-Marc Valin <jmvalin@jmvalin.ca>
date: Tue Aug 14 14:40:32 EDT 2018
Add prediction
--- a/dnn/lpcnet.py
+++ b/dnn/lpcnet.py
@@ -10,7 +10,7 @@
import h5py
import sys
-rnn_units=512
+rnn_units=64
pcm_bits = 8
pcm_levels = 2**pcm_bits
nb_used_features = 38
@@ -41,7 +41,7 @@
}
def new_wavernn_model():
- pcm = Input(shape=(None, 1))
+ pcm = Input(shape=(None, 2))
pitch = Input(shape=(None, 1))
feat = Input(shape=(None, nb_used_features))
dec_feat = Input(shape=(None, 32))
@@ -61,7 +61,7 @@
cpitch = pitch
embed = Embedding(256, 128, embeddings_initializer=PCMInit())
- cpcm = Reshape((-1, 128))(embed(pcm))
+ cpcm = Reshape((-1, 128*2))(embed(pcm))
cfeat = fconv2(fconv1(feat))
--- a/dnn/train_wavenet_audio.py
+++ b/dnn/train_wavenet_audio.py
@@ -25,8 +25,10 @@
model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['sparse_categorical_accuracy'])
model.summary()
-pcmfile = sys.argv[1]
+exc_file = sys.argv[1]
feature_file = sys.argv[2]
+pred_file = sys.argv[3]
+pcm_file = sys.argv[4]
frame_size = 160
nb_features = 54
nb_used_features = wavenet.nb_used_features
@@ -33,8 +35,8 @@
feature_chunk_size = 15
pcm_chunk_size = frame_size*feature_chunk_size
-data = np.fromfile(pcmfile, dtype='int16')
-data = np.minimum(127, lin2ulaw(data[80:]/32768.))
+data = np.fromfile(pcm_file, dtype='int16')
+data = np.minimum(127, lin2ulaw(data/32768.))
nb_frames = len(data)//pcm_chunk_size
features = np.fromfile(feature_file, dtype='float32')
@@ -46,6 +48,13 @@
in_data = in_data + np.random.randint(-1, 1, len(data))
features = np.reshape(features, (nb_frames*feature_chunk_size, nb_features))
+
+pred = np.fromfile(pred_file, dtype='int16')
+pred = pred[:nb_frames*pcm_chunk_size]
+pred = np.minimum(127, lin2ulaw(pred/32768.))
+pred = pred + np.random.randint(-1, 1, len(data))
+
+
pitch = 1.*data
pitch[:320] = 0
for i in range(2, nb_frames*feature_chunk_size):
@@ -60,7 +69,10 @@
out_data = (out_data.astype('int16')+128).astype('uint8')features = np.reshape(features, (nb_frames, feature_chunk_size, nb_features))
features = features[:, :, :nb_used_features]
+pred = np.reshape(pred, (nb_frames, pcm_chunk_size, 1))
+pred = (pred.astype('int16')+128).astype('uint8')+in_data = np.concatenate([in_data, pred], axis=-1)
#in_data = np.concatenate([in_data, in_pitch], axis=-1)
@@ -68,7 +80,7 @@
# f.create_dataset('data', data=in_data[:50000, :, :]) # f.create_dataset('feat', data=features[:50000, :, :])-checkpoint = ModelCheckpoint('wavenet3g_{epoch:02d}.h5')+checkpoint = ModelCheckpoint('wavenet3h9_{epoch:02d}.h5') #model.load_weights('wavernn1c_01.h5')model.compile(optimizer=Adam(0.001, amsgrad=True, decay=2e-4), loss='sparse_categorical_crossentropy', metrics=['sparse_categorical_accuracy'])
--
⑨