ref: d4046036a90c8b3931d2eac91b71ec14b7ba8053
parent: 477d08734d8d716b8875df490787c6b8b59656f6
author: Jean-Marc Valin <jmvalin@jmvalin.ca>
date: Sat Nov 24 06:32:01 EST 2018
Dump Conv1D (didn't check weight ordering at all)
--- a/dnn/dump_lpcnet.py
+++ b/dnn/dump_lpcnet.py
@@ -64,10 +64,7 @@
weights = self.get_weights()
printVector(f, weights[0], name + '_weights')
printVector(f, weights[-1], name + '_bias')
- if hasattr(self, 'activation'):
- activation = self.activation.__name__.upper()
- else:
- activation = 'TANH'
+ activation = self.activation.__name__.upper()
f.write('const DenseLayer {} = {{\n {}_bias,\n {}_weights,\n {}, {}, ACTIVATION_{}\n}};\n\n'.format(name, name, name, weights[0].shape[0], weights[0].shape[1], activation))
hf.write('#define {}_SIZE {}\n'.format(name.upper(), weights[0].shape[1]))@@ -82,10 +79,7 @@
printVector(f, weights[0], name + '_weights')
printVector(f, weights[1], name + '_bias')
printVector(f, weights[1], name + '_factor')
- if hasattr(self, 'activation'):
- activation = self.activation.__name__.upper()
- else:
- activation = 'TANH'
+ activation = self.activation.__name__.upper()
f.write('const MDenseLayer {} = {{\n {}_bias,\n {}_weights,\n {}_factor,\n {}, {}, ACTIVATION_{}\n}};\n\n'.format(name, name, name, name, weights[0].shape[0], weights[0].shape[1], activation))
hf.write('#define {}_SIZE {}\n'.format(name.upper(), weights[0].shape[0]))@@ -92,6 +86,21 @@
hf.write('extern const MDenseLayer {};\n\n'.format(name));return False
MDense.dump_layer = dump_mdense_layer
+
+def dump_conv1d_layer(self, f, hf):
+ name = self.name
+ print("printing layer " + name + " of type " + self.__class__.__name__)+ weights = self.get_weights()
+ printVector(f, weights[0], name + '_weights')
+ printVector(f, weights[-1], name + '_bias')
+ activation = self.activation.__name__.upper()
+ f.write('const Conv1DLayer {} = {{\n {}_bias,\n {}_weights,\n {}, {}, {}, ACTIVATION_{}\n}};\n\n'+ .format(name, name, name, weights[0].shape[1], weights[0].shape[0], weights[0].shape[2], activation))
+ hf.write('#define {}_SIZE {}\n'.format(name.upper(), weights[0].shape[1]))+ hf.write('extern const Conv1DLayer {};\n\n'.format(name));+ return False
+Conv1D.dump_layer = dump_conv1d_layer
+
def dump_embedding_layer(self, f, hf):
name = self.name
--- a/dnn/nnet.c
+++ b/dnn/nnet.c
@@ -128,6 +128,10 @@
compute_activation(output, output, N, layer->activation);
}
+void compute_mdense(const MDenseLayer *layer, float *output, const float *input)
+{+}
+
void compute_gru(const GRULayer *gru, float *state, const float *input)
{int i;
@@ -146,8 +150,7 @@
z[i] = gru->bias[i];
gemm_accum(z, gru->input_weights, N, M, stride, input);
gemm_accum(z, gru->recurrent_weights, N, N, stride, state);
- for (i=0;i<N;i++)
- z[i] = sigmoid_approx(z[i]);
+ compute_activation(z, z, N, ACTIVATION_SIGMOID);
/* Compute reset gate. */
for (i=0;i<N;i++)
@@ -154,8 +157,7 @@
r[i] = gru->bias[N + i];
gemm_accum(r, &gru->input_weights[N], N, M, stride, input);
gemm_accum(r, &gru->recurrent_weights[N], N, N, stride, state);
- for (i=0;i<N;i++)
- r[i] = sigmoid_approx(r[i]);
+ compute_activation(r, r, N, ACTIVATION_SIGMOID);
/* Compute output. */
for (i=0;i<N;i++)
@@ -174,8 +176,9 @@
gemm_accum(h, &gru->input_weights[2*N], N, M, stride, input);
gemm_accum(h, &gru->recurrent_weights[2*N], N, N, stride, tmp);
}
+ compute_activation(h, h, N, gru->activation);
for (i=0;i<N;i++)
- h[i] = z[i]*state[i] + (1-z[i])*tansig_approx(h[i]);
+ h[i] = z[i]*state[i] + (1-z[i])*h[i];
for (i=0;i<N;i++)
state[i] = h[i];
}
--- a/dnn/nnet.h
+++ b/dnn/nnet.h
@@ -84,6 +84,8 @@
void compute_dense(const DenseLayer *layer, float *output, const float *input);
+void compute_mdense(const MDenseLayer *layer, float *output, const float *input);
+
void compute_gru(const GRULayer *gru, float *state, const float *input);
void compute_conv1d(const Conv1DLayer *layer, float *output, float *mem, const float *input);
--
⑨