ref: c7b6935bf2bb6287ceca3b3977618c03d1c007d1
parent: 0098fe70ac5a94952956146ed4795341a3639c79
author: Jean-Marc Valin <jmvalin@amazon.com>
date: Sat May 20 10:21:58 EDT 2023
Add validation for weights blob
--- a/dnn/autogen.sh
+++ b/dnn/autogen.sh
@@ -6,7 +6,7 @@
test -n "$srcdir" && cd "$srcdir"
#SHA1 of the first commit compatible with the current model
-commit=301a9fb
+commit=f1071fa
./download_model.sh $commit
echo "Updating build configuration files for lpcnet, please wait...."
--- a/dnn/lpcnet.c
+++ b/dnn/lpcnet.c
@@ -174,6 +174,7 @@
LPCNET_EXPORT int lpcnet_init(LPCNetState *lpcnet)
{
int i;
+ int ret;
const char* rng_string="LPCNet";
memset(lpcnet, 0, lpcnet_get_size());
lpcnet->last_exc = lin2ulaw(0.f);
@@ -182,8 +183,9 @@
lpcnet->sampling_logit_table[i] = -log((1-prob)/prob);
}
kiss99_srand(&lpcnet->rng, (const unsigned char *)rng_string, strlen(rng_string));
- init_lpcnet_model(&lpcnet->model, lpcnet_arrays);
- return 0;
+ ret = init_lpcnet_model(&lpcnet->model, lpcnet_arrays);
+ celt_assert(ret == 0);
+ return ret;
}
--- a/dnn/parse_lpcnet_weights.c
+++ b/dnn/parse_lpcnet_weights.c
@@ -30,6 +30,8 @@
#include "nnet.h"
+#define SPARSE_BLOCK_SIZE 32
+
extern const WeightArray lpcnet_arrays[];
int parse_record(const unsigned char **data, int *len, WeightArray *array) {
@@ -71,11 +73,42 @@
return nb_arrays;
}
-static const void *find_array(const WeightArray *arrays, const char *name) {
+static const void *find_array_entry(const WeightArray *arrays, const char *name) {
while (arrays->name && strcmp(arrays->name, name) != 0) arrays++;
- return arrays->data;
+ return arrays;
}
+static const void *find_array_check(const WeightArray *arrays, const char *name, int size) {
+ const WeightArray *a = find_array_entry(arrays, name);
+ if (a && a->size == size) return a->data;
+ else return NULL;
+}
+
+static const void *find_idx_check(const WeightArray *arrays, const char *name, int nb_in, int nb_out, int *total_blocks) {
+ int remain;
+ const int *idx;
+ const WeightArray *a = find_array_entry(arrays, name);
+ *total_blocks = 0;
+ if (a == NULL) return NULL;
+ idx = a->data;
+ remain = a->size/sizeof(int);
+ while (remain > 0) {
+ int nb_blocks;
+ int i;
+ nb_blocks = *idx++;
+ if (remain < nb_blocks+1) return NULL;
+ for (i=0;i<nb_blocks;i++) {
+ int pos = *idx++;
+ if (pos+3 >= nb_in || (pos&0x3)) return NULL;
+ }
+ nb_out -= 8;
+ remain -= nb_blocks+1;
+ *total_blocks += nb_blocks;
+ }
+ if (nb_out != 0) return NULL;
+ return a->data;
+}
+
int mdense_init(MDenseLayer *layer, const WeightArray *arrays,
const char *bias,
const char *input_weights,
@@ -85,9 +118,9 @@
int nb_channels,
int activation)
{
- if ((layer->bias = find_array(arrays, bias)) == NULL) return 1;
- if ((layer->input_weights = find_array(arrays, input_weights)) == NULL) return 1;
- if ((layer->factor = find_array(arrays, factor)) == NULL) return 1;
+ if ((layer->bias = find_array_check(arrays, bias, nb_neurons*nb_channels*sizeof(layer->bias[0]))) == NULL) return 1;
+ if ((layer->input_weights = find_array_check(arrays, input_weights, nb_inputs*nb_channels*nb_neurons*sizeof(layer->input_weights[0]))) == NULL) return 1;
+ if ((layer->factor = find_array_check(arrays, factor, nb_channels*nb_neurons*sizeof(layer->factor[0]))) == NULL) return 1;
layer->nb_inputs = nb_inputs;
layer->nb_neurons = nb_neurons;
layer->nb_channels = nb_channels;
@@ -102,8 +135,8 @@
int nb_neurons,
int activation)
{
- if ((layer->bias = find_array(arrays, bias)) == NULL) return 1;
- if ((layer->input_weights = find_array(arrays, input_weights)) == NULL) return 1;
+ if ((layer->bias = find_array_check(arrays, bias, nb_neurons*sizeof(layer->bias[0]))) == NULL) return 1;
+ if ((layer->input_weights = find_array_check(arrays, input_weights, nb_inputs*nb_neurons*sizeof(layer->input_weights[0]))) == NULL) return 1;
layer->nb_inputs = nb_inputs;
layer->nb_neurons = nb_neurons;
layer->activation = activation;
@@ -121,11 +154,12 @@
int activation,
int reset_after)
{
- if ((layer->bias = find_array(arrays, bias)) == NULL) return 1;
- if ((layer->subias = find_array(arrays, subias)) == NULL) return 1;
- if ((layer->input_weights = find_array(arrays, input_weights)) == NULL) return 1;
- if ((layer->input_weights_idx = find_array(arrays, input_weights_idx)) == NULL) return 1;
- if ((layer->recurrent_weights = find_array(arrays, recurrent_weights)) == NULL) return 1;
+ int total_blocks;
+ if ((layer->bias = find_array_check(arrays, bias, 6*nb_neurons*sizeof(layer->bias[0]))) == NULL) return 1;
+ if ((layer->subias = find_array_check(arrays, subias, 6*nb_neurons*sizeof(layer->subias[0]))) == NULL) return 1;
+ if ((layer->input_weights_idx = find_idx_check(arrays, input_weights_idx, nb_inputs, 3*nb_neurons, &total_blocks)) == NULL) return 1;
+ if ((layer->input_weights = find_array_check(arrays, input_weights, SPARSE_BLOCK_SIZE*total_blocks*sizeof(layer->input_weights[0]))) == NULL) return 1;
+ if ((layer->recurrent_weights = find_array_check(arrays, recurrent_weights, 3*nb_neurons*nb_neurons*sizeof(layer->recurrent_weights[0]))) == NULL) return 1;
layer->nb_inputs = nb_inputs;
layer->nb_neurons = nb_neurons;
layer->activation = activation;
@@ -143,11 +177,12 @@
int activation,
int reset_after)
{
- if ((layer->bias = find_array(arrays, bias)) == NULL) return 1;
- if ((layer->subias = find_array(arrays, subias)) == NULL) return 1;
- if ((layer->diag_weights = find_array(arrays, diag_weights)) == NULL) return 1;
- if ((layer->recurrent_weights = find_array(arrays, recurrent_weights)) == NULL) return 1;
- if ((layer->idx = find_array(arrays, idx)) == NULL) return 1;
+ int total_blocks;
+ if ((layer->bias = find_array_check(arrays, bias, 6*nb_neurons*sizeof(layer->bias[0]))) == NULL) return 1;
+ if ((layer->subias = find_array_check(arrays, subias, 6*nb_neurons*sizeof(layer->subias[0]))) == NULL) return 1;
+ if ((layer->diag_weights = find_array_check(arrays, diag_weights, 3*nb_neurons*sizeof(layer->diag_weights[0]))) == NULL) return 1;
+ if ((layer->idx = find_idx_check(arrays, idx, nb_neurons, 3*nb_neurons, &total_blocks)) == NULL) return 1;
+ if ((layer->recurrent_weights = find_array_check(arrays, recurrent_weights, SPARSE_BLOCK_SIZE*total_blocks*sizeof(layer->recurrent_weights[0]))) == NULL) return 1;
layer->nb_neurons = nb_neurons;
layer->activation = activation;
layer->reset_after = reset_after;
@@ -162,8 +197,8 @@
int nb_neurons,
int activation)
{
- if ((layer->bias = find_array(arrays, bias)) == NULL) return 1;
- if ((layer->input_weights = find_array(arrays, input_weights)) == NULL) return 1;
+ if ((layer->bias = find_array_check(arrays, bias, nb_neurons*sizeof(layer->bias[0]))) == NULL) return 1;
+ if ((layer->input_weights = find_array_check(arrays, input_weights, kernel_size*nb_inputs*nb_neurons*sizeof(layer->input_weights[0]))) == NULL) return 1;
layer->nb_inputs = nb_inputs;
layer->kernel_size = kernel_size;
layer->nb_neurons = nb_neurons;
@@ -176,7 +211,7 @@
int nb_inputs,
int dim)
{
- if ((layer->embedding_weights = find_array(arrays, embedding_weights)) == NULL) return 1;
+ if ((layer->embedding_weights = find_array_check(arrays, embedding_weights, nb_inputs*dim*sizeof(layer->embedding_weights[0]))) == NULL) return 1;
layer->nb_inputs = nb_inputs;
layer->dim = dim;
return 0;
--- a/dnn/training_tf2/dump_lpcnet.py
+++ b/dnn/training_tf2/dump_lpcnet.py
@@ -340,13 +340,13 @@
W = model.get_layer('gru_a').get_weights()[0][3*embed_size:,:]
#FIXME: dump only half the biases
b = model.get_layer('gru_a').get_weights()[2]
- dump_dense_layer_impl('gru_a_dense_feature', W, b, 'LINEAR', f, hf)
+ dump_dense_layer_impl('gru_a_dense_feature', W, b[:len(b)//2], 'LINEAR', f, hf)
W = model.get_layer('gru_b').get_weights()[0][model.rnn_units1:,:]
b = model.get_layer('gru_b').get_weights()[2]
# Set biases to zero because they'll be included in the GRU input part
# (we need regular and SU biases)
- dump_dense_layer_impl('gru_b_dense_feature', W, 0*b, 'LINEAR', f, hf)
+ dump_dense_layer_impl('gru_b_dense_feature', W, 0*b[:len(b)//2], 'LINEAR', f, hf)
dump_grub(model.get_layer('gru_b'), f, hf, model.rnn_units1)
layer_list = []
--
⑨