ref: b0c61158f78672c32317eb77bc66f1f4b27bac7f
parent: b9cd61be8b882d315182d224bd657b144a92bd25
author: Jean-Marc Valin <jmvalin@jmvalin.ca>
date: Fri Nov 23 14:51:34 EST 2018
More meaningful names
--- a/dnn/dump_lpcnet.py
+++ b/dnn/dump_lpcnet.py
@@ -32,7 +32,7 @@
def dump_layer_ignore(self, f, hf):
print("ignoring layer " + self.name + " of type " + self.__class__.__name__)- False
+ return False
Layer.dump_layer = dump_layer_ignore
def dump_gru_layer(self, f, hf):
@@ -55,7 +55,7 @@
.format(name, name, name, name, weights[0].shape[0], weights[0].shape[1]//3, activation, reset_after))
hf.write('#define {}_SIZE {}\n'.format(name.upper(), weights[0].shape[1]//3)) hf.write('extern const GRULayer {};\n\n'.format(name));- True
+ return True
CuDNNGRU.dump_layer = dump_gru_layer
GRU.dump_layer = dump_gru_layer
@@ -74,7 +74,7 @@
.format(name, name, name, weights[0].shape[0], weights[0].shape[1], activation))
hf.write('#define {}_SIZE {}\n'.format(name.upper(), weights[0].shape[1])) hf.write('extern const DenseLayer {};\n\n'.format(name));- False
+ return False
Dense.dump_layer = dump_dense_layer
def dump_mdense_layer(self, f, hf):
@@ -93,7 +93,7 @@
.format(name, name, name, name, weights[0].shape[0], weights[0].shape[1], activation))
hf.write('#define {}_SIZE {}\n'.format(name.upper(), weights[0].shape[0])) hf.write('extern const MDenseLayer {};\n\n'.format(name));- False
+ return False
MDense.dump_layer = dump_mdense_layer
--- a/dnn/lpcnet.py
+++ b/dnn/lpcnet.py
@@ -94,12 +94,12 @@
dec_state1 = Input(shape=(rnn_units1,))
dec_state2 = Input(shape=(rnn_units2,))
- fconv1 = Conv1D(128, 3, padding='same', activation='tanh')
- fconv2 = Conv1D(102, 3, padding='same', activation='tanh')
+ fconv1 = Conv1D(128, 3, padding='same', activation='tanh', name='feature_conv1')
+ fconv2 = Conv1D(102, 3, padding='same', activation='tanh', name='feature_conv2')
- embed = Embedding(256, embed_size, embeddings_initializer=PCMInit())
+ embed = Embedding(256, embed_size, embeddings_initializer=PCMInit(), name='embed_sig')
cpcm = Reshape((-1, embed_size*2))(embed(pcm))
- embed2 = Embedding(256, embed_size, embeddings_initializer=PCMInit())
+ embed2 = Embedding(256, embed_size, embeddings_initializer=PCMInit(), name='embed_exc')
cexc = Reshape((-1, embed_size))(embed2(exc))
pembed = Embedding(256, 64)
@@ -107,8 +107,8 @@
cfeat = fconv2(fconv1(cat_feat))
- fdense1 = Dense(128, activation='tanh')
- fdense2 = Dense(128, activation='tanh')
+ fdense1 = Dense(128, activation='tanh', name='feature_dense1')
+ fdense2 = Dense(128, activation='tanh', name='feature_dense2')
cfeat = Add()([cfeat, cat_feat])
cfeat = fdense2(fdense1(cfeat))
@@ -115,10 +115,10 @@
rep = Lambda(lambda x: K.repeat_elements(x, 160, 1))
- rnn = CuDNNGRU(rnn_units1, return_sequences=True, return_state=True)
- rnn2 = CuDNNGRU(rnn_units2, return_sequences=True, return_state=True)
+ rnn = CuDNNGRU(rnn_units1, return_sequences=True, return_state=True, name='gru_a')
+ rnn2 = CuDNNGRU(rnn_units2, return_sequences=True, return_state=True, name='gru_b')
rnn_in = Concatenate()([cpcm, cexc, rep(cfeat)])
- md = MDense(pcm_levels, activation='softmax')
+ md = MDense(pcm_levels, activation='softmax', name='dual_fc')
gru_out1, _ = rnn(rnn_in)
gru_out2, _ = rnn2(Concatenate()([gru_out1, rep(cfeat)]))
ulaw_prob = md(gru_out2)
--
⑨