ref: 37fbcaee0b550c8141cd5d8a7ff40bfc3b2e9c55
parent: 94ac0841df4dc5aa9f72706d637dae62d330ff37
author: Jean-Marc Valin <jmvalin@jmvalin.ca>
date: Sat Nov 24 10:51:08 EST 2018
mdense max size
--- a/dnn/dump_lpcnet.py
+++ b/dnn/dump_lpcnet.py
@@ -14,6 +14,7 @@
max_rnn_neurons = 1
max_conv_inputs = 1
+max_mdense_tmp = 1
def printVector(f, vector, name):
v = np.reshape(vector, (-1));
@@ -80,6 +81,7 @@
Dense.dump_layer = dump_dense_layer
def dump_mdense_layer(self, f, hf):
+ global max_mdense_tmp
name = self.name
print("printing layer " + name + " of type " + self.__class__.__name__)weights = self.get_weights()
@@ -87,6 +89,7 @@
printVector(f, weights[1], name + '_bias')
printVector(f, weights[1], name + '_factor')
activation = self.activation.__name__.upper()
+ max_mdense_tmp = max(max_mdense_tmp, weights[0].shape[0]*weights[0].shape[2])
f.write('const MDenseLayer {} = {{\n {}_bias,\n {}_weights,\n {}_factor,\n {}, {}, {}, ACTIVATION_{}\n}};\n\n'.format(name, name, name, name, weights[0].shape[0], weights[0].shape[1], weights[0].shape[2], activation))
hf.write('#define {}_OUT_SIZE {}\n'.format(name.upper(), weights[0].shape[0]))@@ -148,6 +151,8 @@
hf.write('#define MAX_RNN_NEURONS {}\n\n'.format(max_rnn_neurons)) hf.write('#define MAX_CONV_INPUTS {}\n\n'.format(max_conv_inputs))+hf.write('#define MAX_MDENSE_TMP {}\n\n'.format(max_mdense_tmp))+
hf.write('struct RNNState {\n')for i, name in enumerate(layer_list):
--- a/dnn/nnet.c
+++ b/dnn/nnet.c
@@ -132,11 +132,10 @@
int i, c;
int N, M, C;
int stride;
+ float tmp[MAX_MDENSE_TMP];
M = layer->nb_inputs;
N = layer->nb_neurons;
C = layer->nb_channels;
- /* FIXME: Make this C90. */
- float tmp[N*C];
stride = N*C;
for (i=0;i<N*C;i++)
tmp[i] = layer->bias[i];
--
⑨