ref: d93239e955d67fe6d1b0cbb3880307af11abf7f7
parent: b0c61158f78672c32317eb77bc66f1f4b27bac7f
author: Jean-Marc Valin <jmvalin@jmvalin.ca>
date: Fri Nov 23 15:07:42 EST 2018
Using non-cudnn version of the GRU for the weights Not sure how the layout of the CuDNN version is
--- a/dnn/dump_lpcnet.py
+++ b/dnn/dump_lpcnet.py
@@ -47,8 +47,8 @@
activation = self.activation.__name__.upper()
else:
activation = 'TANH'
- if hasattr(self, 'reset_after'):
- reset_after = self.reset_after
+ if hasattr(self, 'reset_after') and not self.reset_after:
+ reset_after = 0
else:
reset_after = 1
f.write('const GRULayer {} = {{\n {}_bias,\n {}_weights,\n {}_recurrent_weights,\n {}, {}, ACTIVATION_{}, {}\n}};\n\n'@@ -97,7 +97,7 @@
MDense.dump_layer = dump_mdense_layer
-model, _, _ = lpcnet.new_lpcnet_model(rnn_units1=640)
+model, _, _ = lpcnet.new_lpcnet_model(rnn_units1=640, use_gpu=False)
model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['sparse_categorical_accuracy'])
#model.summary()
--- a/dnn/lpcnet.py
+++ b/dnn/lpcnet.py
@@ -2,7 +2,7 @@
import math
from keras.models import Model
-from keras.layers import Input, LSTM, CuDNNGRU, Dense, Embedding, Reshape, Concatenate, Lambda, Conv1D, Multiply, Add, Bidirectional, MaxPooling1D, Activation
+from keras.layers import Input, GRU, CuDNNGRU, Dense, Embedding, Reshape, Concatenate, Lambda, Conv1D, Multiply, Add, Bidirectional, MaxPooling1D, Activation
from keras import backend as K
from keras.initializers import Initializer
from keras.callbacks import Callback
@@ -85,7 +85,7 @@
'seed': self.seed
}
-def new_lpcnet_model(rnn_units1=384, rnn_units2=16, nb_used_features = 38):
+def new_lpcnet_model(rnn_units1=384, rnn_units2=16, nb_used_features = 38, use_gpu=True):
pcm = Input(shape=(None, 2))
exc = Input(shape=(None, 1))
feat = Input(shape=(None, nb_used_features))
@@ -115,8 +115,13 @@
rep = Lambda(lambda x: K.repeat_elements(x, 160, 1))
- rnn = CuDNNGRU(rnn_units1, return_sequences=True, return_state=True, name='gru_a')
- rnn2 = CuDNNGRU(rnn_units2, return_sequences=True, return_state=True, name='gru_b')
+ if use_gpu:
+ rnn = CuDNNGRU(rnn_units1, return_sequences=True, return_state=True, name='gru_a')
+ rnn2 = CuDNNGRU(rnn_units2, return_sequences=True, return_state=True, name='gru_b')
+ else:
+ rnn = GRU(rnn_units1, return_sequences=True, return_state=True, recurrent_activation="sigmoid", reset_after='true', name='gru_a')
+ rnn2 = GRU(rnn_units2, return_sequences=True, return_state=True, recurrent_activation="sigmoid", reset_after='true', name='gru_b')
+
rnn_in = Concatenate()([cpcm, cexc, rep(cfeat)])
md = MDense(pcm_levels, activation='softmax', name='dual_fc')
gru_out1, _ = rnn(rnn_in)
--
⑨