shithub: opus

Download patch

ref: 538f25565a03989540f49e7a9a97a07cb29ee55a
parent: 8d62ba067e9a1cbc806f26a4616365a71a457a83
author: Jean-Marc Valin <jmvalin@jmvalin.ca>
date: Mon Nov 26 11:02:49 EST 2018

Starting to actually test this -- fix a few OOB reads

--- a/dnn/dump_lpcnet.py
+++ b/dnn/dump_lpcnet.py
@@ -116,7 +116,7 @@
     activation = self.activation.__name__.upper()
     max_mdense_tmp = max(max_mdense_tmp, weights[0].shape[0]*weights[0].shape[2])
     f.write('const MDenseLayer {} = {{\n   {}_bias,\n   {}_weights,\n   {}_factor,\n   {}, {}, {}, ACTIVATION_{}\n}};\n\n'
-            .format(name, name, name, name, weights[0].shape[0], weights[0].shape[1], weights[0].shape[2], activation))
+            .format(name, name, name, name, weights[0].shape[1], weights[0].shape[0], weights[0].shape[2], activation))
     hf.write('#define {}_OUT_SIZE {}\n'.format(name.upper(), weights[0].shape[0]))
     hf.write('extern const MDenseLayer {};\n\n'.format(name));
     return False
--- a/dnn/lpcnet.c
+++ b/dnn/lpcnet.c
@@ -25,6 +25,7 @@
 */
 
 #include <math.h>
+#include <stdio.h>
 #include "nnet_data.h"
 #include "nnet.h"
 #include "common.h"
@@ -150,3 +151,21 @@
         lpcnet->last_exc = exc;
     }
 }
+
+#if 1
+#define FRAME_SIZE 160
+int main(int argc, char **argv) {
+    LPCNetState *net;
+    net = lpcnet_create();
+    while (1) {
+        float features[NB_FEATURES];
+        short pcm[FRAME_SIZE];
+        fread(features, sizeof(features[0]), NB_FEATURES, stdin);
+        if (feof(stdin)) break;
+        lpcnet_synthesize(net, pcm, features, FRAME_SIZE);
+        fwrite(pcm, sizeof(pcm[0]), FRAME_SIZE, stdout);
+    }
+    lpcnet_destroy(net);
+    return 0;
+}
+#endif
--- a/dnn/nnet.c
+++ b/dnn/nnet.c
@@ -217,13 +217,13 @@
    celt_assert(input != output);
    celt_assert(layer->nb_inputs*layer->kernel_size <= MAX_CONV_INPUTS);
    RNN_COPY(tmp, mem, layer->nb_inputs*(layer->kernel_size-1));
-   RNN_COPY(tmp, input, layer->nb_inputs);
+   RNN_COPY(&tmp[layer->nb_inputs*(layer->kernel_size-1)], input, layer->nb_inputs);
    M = layer->nb_inputs*layer->kernel_size;
    N = layer->nb_neurons;
    stride = N;
    for (i=0;i<N;i++)
       output[i] = layer->bias[i];
-   gemm_accum(output, layer->input_weights, N, M, stride, input);
+   gemm_accum(output, layer->input_weights, N, M, stride, tmp);
    compute_activation(output, output, N, layer->activation);
    RNN_COPY(mem, &tmp[layer->nb_inputs], layer->nb_inputs*(layer->kernel_size-1));
 }
--