ref: f3bc6bacd25a4cd5b69e680348727997b3f177d1
parent: 3e2198c6e10d77df440371925577f04d55cb26c5
author: Jean-Marc Valin <jmvalin@amazon.com>
date: Wed Feb 2 19:26:44 EST 2022
Avoiding tmp buffer overflows
--- a/dnn/lpcnet.c
+++ b/dnn/lpcnet.c
@@ -98,7 +98,6 @@
compute_conv1d(&feature_conv1, conv1_out, net->feature_conv1_state, in);
if (lpcnet->frame_count < FEATURE_CONV1_DELAY) RNN_CLEAR(conv1_out, FEATURE_CONV1_OUT_SIZE);
compute_conv1d(&feature_conv2, conv2_out, net->feature_conv2_state, conv1_out);
- celt_assert(FRAME_INPUT_SIZE == FEATURE_CONV2_OUT_SIZE);
if (lpcnet->frame_count < FEATURES_DELAY) RNN_CLEAR(conv2_out, FEATURE_CONV2_OUT_SIZE);
_lpcnet_compute_dense(&feature_dense1, dense1_out, conv2_out);
_lpcnet_compute_dense(&feature_dense2, condition, dense1_out);
--- a/dnn/lpcnet_plc.c
+++ b/dnn/lpcnet_plc.c
@@ -62,7 +62,7 @@
}
static void compute_plc_pred(PLCNetState *net, float *out, const float *in) {
- float zeros[1024] = {0};
+ float zeros[3*PLC_MAX_RNN_NEURONS] = {0};
float dense_out[PLC_DENSE1_OUT_SIZE];
_lpcnet_compute_dense(&plc_dense1, dense_out, in);
compute_gruB(&plc_gru1, zeros, net->plc_gru1_state, dense_out);
--- a/dnn/nnet.c
+++ b/dnn/nnet.c
@@ -38,6 +38,7 @@
#include "tansig_table.h"
#include "nnet.h"
#include "nnet_data.h"
+#include "plc_data.h"
#ifdef NO_OPTIMIZATIONS
#warning Compiling without any vectorization. This code will be very slow
@@ -315,13 +316,15 @@
state[i] = h[i];
}
+#define MAX_RNN_NEURONS_ALL IMAX(MAX_RNN_NEURONS, PLC_MAX_RNN_NEURONS)
+
void compute_gruB(const GRULayer *gru, const float* gru_b_condition, float *state, const float *input)
{
int i;
int N, M;
int stride;
- float zrh[3*MAX_RNN_NEURONS];
- float recur[3*MAX_RNN_NEURONS];
+ float zrh[3*MAX_RNN_NEURONS_ALL];
+ float recur[3*MAX_RNN_NEURONS_ALL];
float *z;
float *r;
float *h;
@@ -330,7 +333,7 @@
z = zrh;
r = &zrh[N];
h = &zrh[2*N];
- celt_assert(gru->nb_neurons <= MAX_RNN_NEURONS);
+ celt_assert(gru->nb_neurons <= MAX_RNN_NEURONS_ALL);
celt_assert(input != state);
celt_assert(gru->reset_after);
stride = 3*N;
--
⑨