ref: c025744e34df941db169d5061e3c07a767fe32d8
parent: 66486004ba1b2c44d53cd58b8346f9d78480b19f
author: Jean-Marc Valin <jmvalin@jmvalin.ca>
date: Sat Nov 24 10:30:17 EST 2018
Fix conv1d, default to size 384
--- a/dnn/dump_lpcnet.py
+++ b/dnn/dump_lpcnet.py
@@ -115,7 +115,7 @@
Embedding.dump_layer = dump_embedding_layer
-model, _, _ = lpcnet.new_lpcnet_model(rnn_units1=640, use_gpu=False)
+model, _, _ = lpcnet.new_lpcnet_model(rnn_units1=384, use_gpu=False)
model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['sparse_categorical_accuracy'])
#model.summary()
--- a/dnn/nnet.c
+++ b/dnn/nnet.c
@@ -210,11 +210,9 @@
int stride;
float tmp[MAX_CONV_INPUTS];
celt_assert(layer->nb_inputs*layer->kernel_size <= MAX_CONV_INPUTS);
- M = layer->nb_inputs;
- N = layer->nb_neurons;
RNN_COPY(tmp, mem, layer->nb_inputs*(layer->kernel_size-1));
RNN_COPY(tmp, input, layer->nb_inputs);
- M = layer->nb_inputs;
+ M = layer->nb_inputs*layer->kernel_size;
N = layer->nb_neurons;
stride = N;
for (i=0;i<N;i++)
--
⑨