shithub: opus

Download patch

ref: fd9002e98e804e388e0a150b7e5617282b29e876
parent: 617e462be36bba4f3a1a0fba4a212cb9dded3236
author: Jean-Marc Valin <jmvalin@jmvalin.ca>
date: Tue Jun 26 12:40:55 EDT 2018

Adding pitch

--- a/dnn/denoise.c
+++ b/dnn/denoise.c
@@ -46,7 +46,7 @@
 #define WINDOW_SIZE (2*FRAME_SIZE)
 #define FREQ_SIZE (FRAME_SIZE + 1)
 
-#define PITCH_MIN_PERIOD 20
+#define PITCH_MIN_PERIOD 32
 #define PITCH_MAX_PERIOD 256
 #define PITCH_FRAME_SIZE 320
 #define PITCH_BUF_SIZE (PITCH_MAX_PERIOD+PITCH_FRAME_SIZE)
@@ -321,7 +321,7 @@
     float g_1;
     _celt_autocorr(x, ac, NULL, 0, LPC_ORDER, WINDOW_SIZE);
     /* -40 dB noise floor. */
-    ac[0] += ac[0]*1e-4;
+    ac[0] += ac[0]*1e-4 + 320/12;
     /* Lag windowing. */
     for (i=1;i<LPC_ORDER+1;i++) ac[i] *= (1 - 6e-5*i*i);
     e = _celt_lpc(lpc, rc, ac, LPC_ORDER);
@@ -582,7 +582,6 @@
     preemphasis(x, &mem_preemph, x, PREEMPHASIS, FRAME_SIZE);
 
     compute_frame_features(st, iexc, X, P, Ex, Ep, Exp, features, x);
-    pitch_filter(X, P, Ex, Ep, Exp, g);
 #if 1
     fwrite(features, sizeof(float), NB_FEATURES, stdout);
     fwrite(iexc, sizeof(signed char), FRAME_SIZE, fexc);
--- a/dnn/lpcnet.py
+++ b/dnn/lpcnet.py
@@ -16,7 +16,7 @@
 
 
 def new_wavernn_model():
-    pcm = Input(shape=(None, 1))
+    pcm = Input(shape=(None, 2))
     feat = Input(shape=(None, nb_used_features))
 
     rep = Lambda(lambda x: K.repeat_elements(x, 160, 1))
--- a/dnn/train_lpcnet.py
+++ b/dnn/train_lpcnet.py
@@ -7,6 +7,7 @@
 from keras.callbacks import ModelCheckpoint
 from ulaw import ulaw2lin, lin2ulaw
 import keras.backend as K
+import h5py
 
 import tensorflow as tf
 from keras.backend.tensorflow_backend import set_session
@@ -23,10 +24,11 @@
 
 pcmfile = sys.argv[1]
 feature_file = sys.argv[2]
+frame_size = 160
 nb_features = 54
 nb_used_features = lpcnet.nb_used_features
 feature_chunk_size = 15
-pcm_chunk_size = 160*feature_chunk_size
+pcm_chunk_size = frame_size*feature_chunk_size
 
 data = np.fromfile(pcmfile, dtype='int8')
 nb_frames = len(data)//pcm_chunk_size
@@ -38,6 +40,15 @@
 
 in_data = np.concatenate([data[0:1], data[:-1]])/16.;
 
+features = np.reshape(features, (nb_frames*feature_chunk_size, nb_features))
+pitch = 1.*data
+pitch[:320] = 0
+for i in range(2, nb_frames*feature_chunk_size):
+    period = int(50*features[i,36]+100)
+    period = period - 4
+    pitch[i*frame_size:(i+1)*frame_size] = data[i*frame_size-period:(i+1)*frame_size-period]
+in_pitch = np.reshape(pitch/16., (nb_frames, pcm_chunk_size, 1))
+
 in_data = np.reshape(in_data, (nb_frames, pcm_chunk_size, 1))
 out_data = np.reshape(data, (nb_frames, pcm_chunk_size, 1))
 out_data = (out_data.astype('int16')+128).astype('uint8')
@@ -44,7 +55,14 @@
 features = np.reshape(features, (nb_frames, feature_chunk_size, nb_features))
 features = features[:, :, :nb_used_features]
 
-checkpoint = ModelCheckpoint('lpcnet1c_{epoch:02d}.h5')
+
+in_data = np.concatenate([in_data, in_pitch], axis=-1)
+
+#with h5py.File('in_data.h5', 'w') as f:
+# f.create_dataset('data', data=in_data[:50000, :, :])
+# f.create_dataset('feat', data=features[:50000, :, :])
+
+checkpoint = ModelCheckpoint('lpcnet1e_{epoch:02d}.h5')
 
 #model.load_weights('wavernn1c_01.h5')
 model.compile(optimizer=Adam(0.002, amsgrad=True, decay=2e-4), loss='sparse_categorical_crossentropy', metrics=['sparse_categorical_accuracy'])
--