shithub: opus

Download patch

ref: 6a9831a6b038638266165dc1e9f115678f0b330e
parent: 1ddfcfd48cb87f8dc29240d705a4da78bae0eb50
author: Jean-Marc Valin <jmvalin@jmvalin.ca>
date: Thu Jan 18 13:16:54 EST 2024

Remove run-time code for old TF2 models

No longer needed now that PLC is trained with PyTorch stack

--- a/dnn/nnet.c
+++ b/dnn/nnet.c
@@ -115,78 +115,6 @@
    }
 }
 
-void _lpcnet_compute_dense(const DenseLayer *layer, float *output, const float *input, int arch)
-{
-   LinearLayer matrix;
-   celt_assert(input != output);
-   matrix.bias = layer->bias;
-   matrix.subias = NULL;
-   matrix.float_weights = layer->input_weights;
-   matrix.weights = NULL;
-   matrix.weights_idx = NULL;
-   matrix.diag = NULL;
-   matrix.nb_inputs = layer->nb_inputs;
-   matrix.nb_outputs = layer->nb_neurons;
-   matrix.scale = NULL;
-   compute_linear(&matrix, output, input, arch);
-   compute_activation(output, output, layer->nb_neurons, layer->activation, arch);
-}
-
-#ifdef USE_SU_BIAS
-#define bias_type subias
-#else
-#define bias_type bias
-#endif
-#define MAX_IDX_SIZE 8192
-
-void compute_gruB(const GRULayer *gru, const float* gru_b_condition, float *state, const float *input, int arch)
-{
-  LinearLayer in_matrix, rec_matrix;
-  int i, M, N;
-  float bias[3*MAX_RNN_NEURONS_ALL];
-  float scale[3*MAX_RNN_NEURONS_ALL];
-  M = gru->nb_inputs;
-  N = gru->nb_neurons;
-
-  in_matrix.bias = bias;
-  in_matrix.diag = NULL;
-  in_matrix.nb_inputs = M;
-  in_matrix.nb_outputs = 3*N;
-  in_matrix.subias = bias;
-#ifdef DISABLE_DOT_PROD
-  for (i=0;i<3*N;i++) bias[i] = gru->bias[i] + gru_b_condition[i];
-  in_matrix.scale = NULL;
-  in_matrix.float_weights = gru->input_weights;
-  in_matrix.weights = NULL;
-#else
-  for (i=0;i<3*N;i++) bias[i] = gru->bias_type[i] + gru_b_condition[i];
-  for (i=0;i<3*N;i++) scale[i] = SCALE_1;
-  in_matrix.scale = scale;
-  in_matrix.weights = gru->input_weights;
-  in_matrix.float_weights = NULL;
-#endif
-  in_matrix.weights_idx = gru->input_weights_idx;
-
-  rec_matrix.bias = &gru->bias[3*N];
-  rec_matrix.diag = NULL;
-  rec_matrix.nb_inputs = N;
-  rec_matrix.nb_outputs = 3*N;
-  rec_matrix.scale = scale;
-  rec_matrix.subias = &gru->subias[3*N];
-#ifdef DISABLE_DOT_PROD
-  rec_matrix.scale = NULL;
-  rec_matrix.float_weights = gru->recurrent_weights;
-  rec_matrix.weights = NULL;
-#else
-  rec_matrix.scale = scale;
-  rec_matrix.weights = gru->recurrent_weights;
-  rec_matrix.float_weights = NULL;
-#endif
-  rec_matrix.weights_idx = NULL;
-  compute_generic_gru(&in_matrix, &rec_matrix, state, input, arch);
-}
-
-
 #define MAX_CONV_INPUTS_ALL DRED_MAX_CONV_INPUTS
 
 void compute_generic_conv1d(const LinearLayer *layer, float *output, float *mem, const float *input, int input_size, int activation, int arch)
--- a/dnn/nnet.h
+++ b/dnn/nnet.h
@@ -31,13 +31,6 @@
 #include <stddef.h>
 #include "opus_types.h"
 
-#ifdef DISABLE_DOT_PROD
-typedef float qweight;
-#else
-typedef signed char qweight;
-#define DOT_PROD
-#endif
-
 #define ACTIVATION_LINEAR  0
 #define ACTIVATION_SIGMOID 1
 #define ACTIVATION_TANH    2
@@ -91,41 +84,7 @@
   int kheight;
 } Conv2dLayer;
 
-typedef struct {
-  const float *bias;
-  const float *input_weights;
-  int nb_inputs;
-  int nb_neurons;
-  int activation;
-} DenseLayer;
 
-typedef struct {
-  const float *bias;
-  const float *subias;
-  const qweight *input_weights;
-  const int *input_weights_idx;
-  const qweight *recurrent_weights;
-  int nb_inputs;
-  int nb_neurons;
-  int activation;
-  int reset_after;
-} GRULayer;
-
-typedef struct {
-  const float *bias;
-  const float *input_weights;
-  int nb_inputs;
-  int kernel_size;
-  int nb_neurons;
-  int activation;
-} Conv1DLayer;
-
-typedef struct {
-  const float *embedding_weights;
-  int nb_inputs;
-  int dim;
-} EmbeddingLayer;
-
 void compute_generic_dense(const LinearLayer *layer, float *output, const float *input, int activation, int arch);
 void compute_generic_gru(const LinearLayer *input_weights, const LinearLayer *recurrent_weights, float *state, const float *in, int arch);
 void compute_generic_conv1d(const LinearLayer *layer, float *output, float *mem, const float *input, int input_size, int activation, int arch);
@@ -134,10 +93,6 @@
 void compute_gated_activation(const LinearLayer *layer, float *output, const float *input, int activation, int arch);
 
 
-void _lpcnet_compute_dense(const DenseLayer *layer, float *output, const float *input, int arch);
-
-void compute_gruB(const GRULayer *gru, const float* gru_b_condition, float *state, const float *input, int arch);
-
 int parse_weights(WeightArray **list, const unsigned char *data, int len);
 
 
@@ -168,24 +123,6 @@
   int out_channels,
   int ktime,
   int kheight);
-
-int dense_init(DenseLayer *layer, const WeightArray *arrays,
-  const char *bias,
-  const char *input_weights,
-  int nb_inputs,
-  int nb_neurons,
-  int activation);
-
-int gru_init(GRULayer *layer, const WeightArray *arrays,
-  const char *bias,
-  const char *subias,
-  const char *input_weights,
-  const char *input_weights_idx,
-  const char *recurrent_weights,
-  int nb_inputs,
-  int nb_neurons,
-  int activation,
-  int reset_after);
 
 
 void compute_linear_c(const LinearLayer *linear, float *out, const float *in);
--- a/dnn/parse_lpcnet_weights.c
+++ b/dnn/parse_lpcnet_weights.c
@@ -176,46 +176,6 @@
   return 0;
 }
 
-
-int dense_init(DenseLayer *layer, const WeightArray *arrays,
-  const char *bias,
-  const char *input_weights,
-  int nb_inputs,
-  int nb_neurons,
-  int activation)
-{
-  if ((layer->bias = find_array_check(arrays, bias, nb_neurons*sizeof(layer->bias[0]))) == NULL) return 1;
-  if ((layer->input_weights = find_array_check(arrays, input_weights, nb_inputs*nb_neurons*sizeof(layer->input_weights[0]))) == NULL) return 1;
-  layer->nb_inputs = nb_inputs;
-  layer->nb_neurons = nb_neurons;
-  layer->activation = activation;
-  return 0;
-}
-
-int gru_init(GRULayer *layer, const WeightArray *arrays,
-  const char *bias,
-  const char *subias,
-  const char *input_weights,
-  const char *input_weights_idx,
-  const char *recurrent_weights,
-  int nb_inputs,
-  int nb_neurons,
-  int activation,
-  int reset_after)
-{
-  int total_blocks;
-  if ((layer->bias = find_array_check(arrays, bias, 6*nb_neurons*sizeof(layer->bias[0]))) == NULL) return 1;
-  if ((layer->subias = find_array_check(arrays, subias, 6*nb_neurons*sizeof(layer->subias[0]))) == NULL) return 1;
-  if ((layer->input_weights_idx = find_idx_check(arrays, input_weights_idx, nb_inputs, 3*nb_neurons, &total_blocks)) == NULL) return 1;
-  if ((layer->input_weights = find_array_check(arrays, input_weights, SPARSE_BLOCK_SIZE*total_blocks*sizeof(layer->input_weights[0]))) == NULL) return 1;
-  if ((layer->recurrent_weights = find_array_check(arrays, recurrent_weights, 3*nb_neurons*nb_neurons*sizeof(layer->recurrent_weights[0]))) == NULL) return 1;
-  layer->nb_inputs = nb_inputs;
-  layer->nb_neurons = nb_neurons;
-  layer->activation = activation;
-  layer->reset_after = reset_after;
-  return 0;
-}
-
 int conv2d_init(Conv2dLayer *layer, const WeightArray *arrays,
   const char *bias,
   const char *float_weights,
--