shithub: opus

Download patch

ref: 71c8a23fc1a13fe2dcb552fee2b65c9a2baa3e3d
parent: cc714cc5b004ca6304053322bead45a0d306876e
author: Jean-Marc Valin <jmvalin@amazon.com>
date: Tue May 16 19:15:49 EDT 2023

Code for building a model struct

--- a/dnn/nnet.h
+++ b/dnn/nnet.h
@@ -145,4 +145,57 @@
 
 int sample_from_pdf(const float *pdf, int N, float exp_boost, float pdf_floor);
 
+
+
+int mdense_init(MDenseLayer *layer, const WeightArray *arrays,
+  const char *bias,
+  const char *input_weights,
+  const char *factor,
+  int nb_inputs,
+  int nb_neurons,
+  int nb_channels,
+  int activation);
+
+int dense_init(DenseLayer *layer, const WeightArray *arrays,
+  const char *bias,
+  const char *input_weights,
+  int nb_inputs,
+  int nb_neurons,
+  int activation);
+
+int gru_init(GRULayer *layer, const WeightArray *arrays,
+  const char *bias,
+  const char *subias,
+  const char *input_weights,
+  const char *input_weights_idx,
+  const char *recurrent_weights,
+  int nb_inputs,
+  int nb_neurons,
+  int activation,
+  int reset_after);
+
+int sparse_gru_init(SparseGRULayer *layer, const WeightArray *arrays,
+  const char *bias,
+  const char *subias,
+  const char *diag_weights,
+  const char *recurrent_weights,
+  const char *idx,
+  int nb_neurons,
+  int activation,
+  int reset_after);
+
+int conv1d_init(Conv1DLayer *layer, const WeightArray *arrays,
+  const char *bias,
+  const char *input_weights,
+  int nb_inputs,
+  int kernel_size,
+  int nb_neurons,
+  int activation);
+
+int embedding_init(EmbeddingLayer *layer, const WeightArray *arrays,
+  const char *embedding_weights,
+  int nb_inputs,
+  int dim);
+
+
 #endif /* _MLP_H_ */
--- a/dnn/parse_lpcnet_weights.c
+++ b/dnn/parse_lpcnet_weights.c
@@ -71,6 +71,120 @@
   return nb_arrays;
 }
 
+static const void *find_array(const WeightArray *arrays, const char *name) {
+  while (arrays->name && strcmp(arrays->name, name) != 0) arrays++;
+  return arrays->data;
+}
+
+int mdense_init(MDenseLayer *layer, const WeightArray *arrays,
+  const char *bias,
+  const char *input_weights,
+  const char *factor,
+  int nb_inputs,
+  int nb_neurons,
+  int nb_channels,
+  int activation)
+{
+  if ((layer->bias = find_array(arrays, bias)) == NULL) return 1;
+  if ((layer->input_weights = find_array(arrays, input_weights)) == NULL) return 1;
+  if ((layer->factor = find_array(arrays, factor)) == NULL) return 1;
+  layer->nb_inputs = nb_inputs;
+  layer->nb_neurons = nb_neurons;
+  layer->nb_channels = nb_channels;
+  layer->activation = activation;
+  return 0;
+}
+
+int dense_init(DenseLayer *layer, const WeightArray *arrays,
+  const char *bias,
+  const char *input_weights,
+  int nb_inputs,
+  int nb_neurons,
+  int activation)
+{
+  if ((layer->bias = find_array(arrays, bias)) == NULL) return 1;
+  if ((layer->input_weights = find_array(arrays, input_weights)) == NULL) return 1;
+  layer->nb_inputs = nb_inputs;
+  layer->nb_neurons = nb_neurons;
+  layer->activation = activation;
+  return 0;
+}
+
+int gru_init(GRULayer *layer, const WeightArray *arrays,
+  const char *bias,
+  const char *subias,
+  const char *input_weights,
+  const char *input_weights_idx,
+  const char *recurrent_weights,
+  int nb_inputs,
+  int nb_neurons,
+  int activation,
+  int reset_after)
+{
+  if ((layer->bias = find_array(arrays, bias)) == NULL) return 1;
+  if ((layer->subias = find_array(arrays, subias)) == NULL) return 1;
+  if ((layer->input_weights = find_array(arrays, input_weights)) == NULL) return 1;
+  if ((layer->input_weights_idx = find_array(arrays, input_weights_idx)) == NULL) return 1;
+  if ((layer->recurrent_weights = find_array(arrays, recurrent_weights)) == NULL) return 1;
+  layer->nb_inputs = nb_inputs;
+  layer->nb_neurons = nb_neurons;
+  layer->activation = activation;
+  layer->reset_after = reset_after;
+  return 0;
+}
+
+int sparse_gru_init(SparseGRULayer *layer, const WeightArray *arrays,
+  const char *bias,
+  const char *subias,
+  const char *diag_weights,
+  const char *recurrent_weights,
+  const char *idx,
+  int nb_neurons,
+  int activation,
+  int reset_after)
+{
+  if ((layer->bias = find_array(arrays, bias)) == NULL) return 1;
+  if ((layer->subias = find_array(arrays, subias)) == NULL) return 1;
+  if ((layer->diag_weights = find_array(arrays, diag_weights)) == NULL) return 1;
+  if ((layer->recurrent_weights = find_array(arrays, recurrent_weights)) == NULL) return 1;
+  if ((layer->idx = find_array(arrays, idx)) == NULL) return 1;
+  layer->nb_neurons = nb_neurons;
+  layer->activation = activation;
+  layer->reset_after = reset_after;
+  return 0;
+}
+
+int conv1d_init(Conv1DLayer *layer, const WeightArray *arrays,
+  const char *bias,
+  const char *input_weights,
+  int nb_inputs,
+  int kernel_size,
+  int nb_neurons,
+  int activation)
+{
+  if ((layer->bias = find_array(arrays, bias)) == NULL) return 1;
+  if ((layer->input_weights = find_array(arrays, input_weights)) == NULL) return 1;
+  layer->nb_inputs = nb_inputs;
+  layer->kernel_size = kernel_size;
+  layer->nb_neurons = nb_neurons;
+  layer->activation = activation;
+  return 0;
+}
+
+int embedding_init(EmbeddingLayer *layer, const WeightArray *arrays,
+  const char *embedding_weights,
+  int nb_inputs,
+  int dim)
+{
+  if ((layer->embedding_weights = find_array(arrays, embedding_weights)) == NULL) return 1;
+  layer->nb_inputs = nb_inputs;
+  layer->dim = dim;
+  return 0;
+}
+
+
+
+#if 0
 #include <fcntl.h>
 #include <sys/mman.h>
 #include <unistd.h>
@@ -102,3 +216,4 @@
   close(fd);
   return 0;
 }
+#endif
--- a/dnn/training_tf2/dump_lpcnet.py
+++ b/dnn/training_tf2/dump_lpcnet.py
@@ -26,6 +26,7 @@
 '''
 
 import os
+import io
 import lpcnet
 import sys
 import numpy as np
@@ -39,7 +40,6 @@
 import re
 import argparse
 
-array_list = []
 
 # no cuda devices needed
 os.environ['CUDA_VISIBLE_DEVICES'] = ""
@@ -148,6 +148,9 @@
     hf.write('#define {}_OUT_SIZE {}\n'.format(name.upper(), weights[0].shape[1]//3))
     hf.write('#define {}_STATE_SIZE {}\n'.format(name.upper(), weights[0].shape[1]//3))
     hf.write('extern const SparseGRULayer {};\n\n'.format(name));
+    model_struct.write('  SparseGRULayer {};\n'.format(name));
+    model_init.write('  if (sparse_gru_init(&model->{}, arrays, "{}_bias", "{}_subias", "{}_recurrent_weights_diag", "{}_recurrent_weights", "{}_recurrent_weights_idx",  {}, ACTIVATION_{}, {})) return 1;\n'
+            .format(name, name, name, name, name, name, weights[0].shape[1]//3, activation, reset_after))
     return True
 
 def dump_grub(self, f, hf, gru_a_size):
@@ -182,6 +185,9 @@
     f.write('const GRULayer {} = {{\n   {}_bias,\n   {}_subias,\n   {}_weights,\n   {}_weights_idx,\n   {}_recurrent_weights,\n   {}, {}, ACTIVATION_{}, {}\n}};\n\n'
             .format(name, name, name, name, name, name, gru_a_size, weights[0].shape[1]//3, activation, reset_after))
     hf.write('extern const GRULayer {};\n\n'.format(name));
+    model_struct.write('  GRULayer {};\n'.format(name));
+    model_init.write('  if (gru_init(&model->{}, arrays, "{}_bias", "{}_subias", "{}_weights", "{}_weights_idx", "{}_recurrent_weights", {}, {}, ACTIVATION_{}, {})) return 1;\n'
+            .format(name, name, name, name, name, name, gru_a_size, weights[0].shape[1]//3, activation, reset_after))
     return True
 
 def dump_gru_layer_dummy(self, f, hf):
@@ -200,6 +206,9 @@
             .format(name, name, name, weights.shape[0], weights.shape[1], activation))
     hf.write('#define {}_OUT_SIZE {}\n'.format(name.upper(), weights.shape[1]))
     hf.write('extern const DenseLayer {};\n\n'.format(name));
+    model_struct.write('  DenseLayer {};\n'.format(name));
+    model_init.write('  if (dense_init(&model->{}, arrays, "{}_bias", "{}_weights", {}, {}, ACTIVATION_{})) return 1;\n'
+            .format(name, name, name, weights.shape[0], weights.shape[1], activation))
 
 def dump_dense_layer(self, f, hf):
     name = self.name
@@ -225,6 +234,9 @@
             .format(name, name, name, name, weights[0].shape[1], weights[0].shape[0], weights[0].shape[2], activation))
     hf.write('#define {}_OUT_SIZE {}\n'.format(name.upper(), weights[0].shape[0]))
     hf.write('extern const MDenseLayer {};\n\n'.format(name));
+    model_struct.write('  MDenseLayer {};\n'.format(name));
+    model_init.write('  if (mdense_init(&model->{}, arrays, "{}_bias",  "{}_weights",  "{}_factor",  {}, {}, {}, ACTIVATION_{})) return 1;\n'
+            .format(name, name, name, name, weights[0].shape[1], weights[0].shape[0], weights[0].shape[2], activation))
     return False
 MDense.dump_layer = dump_mdense_layer
 
@@ -243,6 +255,9 @@
     hf.write('#define {}_STATE_SIZE ({}*{})\n'.format(name.upper(), weights[0].shape[1], (weights[0].shape[0]-1)))
     hf.write('#define {}_DELAY {}\n'.format(name.upper(), (weights[0].shape[0]-1)//2))
     hf.write('extern const Conv1DLayer {};\n\n'.format(name));
+    model_struct.write('  Conv1DLayer {};\n'.format(name));
+    model_init.write('  if (conv1d_init(&model->{}, arrays, "{}_bias", "{}_weights", {}, {}, {}, ACTIVATION_{})) return 1;\n'
+            .format(name, name, name, weights[0].shape[1], weights[0].shape[0], weights[0].shape[2], activation))
     return True
 Conv1D.dump_layer = dump_conv1d_layer
 
@@ -253,6 +268,9 @@
             .format(name, name, weights.shape[0], weights.shape[1]))
     hf.write('#define {}_OUT_SIZE {}\n'.format(name.upper(), weights.shape[1]))
     hf.write('extern const EmbeddingLayer {};\n\n'.format(name));
+    model_struct.write('  EmbeddingLayer {};\n'.format(name));
+    model_init.write('  if (embedding_init(&model->{}, arrays, "{}_weights", {}, {})) return 1;\n'
+            .format(name, name, weights.shape[0], weights.shape[1]))
 
 def dump_embedding_layer(self, f, hf):
     name = self.name
@@ -291,6 +309,11 @@
 
     f = open(cfile, 'w')
     hf = open(hfile, 'w')
+    model_struct = io.StringIO()
+    model_init = io.StringIO()
+    model_struct.write('typedef struct {\n')
+    model_init.write('int init_lpcnet_model(LPCNetModel *model, const WeightArray *arrays) {\n')
+    array_list = []
 
     f.write('/*This file is automatically generated from a Keras model*/\n')
     f.write('/*based on model {}*/\n\n'.format(sys.argv[1]))
@@ -359,8 +382,11 @@
         f.write('  {{"{}", WEIGHTS_{}_TYPE, sizeof({}), {}}},\n'.format(name, name, name, name))
         f.write('#endif\n')
     f.write('  {NULL, 0, 0}\n};\n')
-    f.write('#endif\n')
+    f.write('#endif\n\n')
 
+    model_init.write('  return 0;\n}\n')
+    f.write(model_init.getvalue())
+
     hf.write('#define MAX_RNN_NEURONS {}\n\n'.format(max_rnn_neurons))
     hf.write('#define MAX_CONV_INPUTS {}\n\n'.format(max_conv_inputs))
     hf.write('#define MAX_MDENSE_TMP {}\n\n'.format(max_mdense_tmp))
@@ -369,8 +395,10 @@
     hf.write('typedef struct {\n')
     for i, name in enumerate(layer_list):
         hf.write('  float {}_state[{}_STATE_SIZE];\n'.format(name, name.upper())) 
-    hf.write('} NNetState;\n')
+    hf.write('} NNetState;\n\n')
 
+    model_struct.write('} LPCNetModel;\n\n')
+    hf.write(model_struct.getvalue())
     hf.write('\n\n#endif\n')
 
     f.close()
--- a/dnn/write_lpcnet_weights.c
+++ b/dnn/write_lpcnet_weights.c
@@ -39,7 +39,7 @@
   unsigned char zeros[WEIGHT_BLOCK_SIZE] = {0};
   while (list[i].name != NULL) {
     WeightHead h;
-    strcpy(h.head, "DNNw");
+    memcpy(h.head, "DNNw", 4);
     h.version = WEIGHT_BLOB_VERSION;
     h.type = list[i].type;
     h.size = list[i].size;
--