shithub: opus

Download patch

ref: 4698b283451541b8e8d2b3d4dd14476fea32e8b5
parent: d1811399301dfab6241bd0e89c19abc411210f0c
author: Jean-Marc Valin <jmvalin@jmvalin.ca>
date: Thu Jan 17 12:03:48 EST 2019

Making dump_lpcnet.py a bit more robust

Avoid relying on the order of the layers

--- a/dnn/dump_lpcnet.py
+++ b/dnn/dump_lpcnet.py
@@ -236,16 +236,16 @@
 embed_size = lpcnet.embed_size
 
 E = model.get_layer('embed_sig').get_weights()[0]
-W = model.layers[18].get_weights()[0][:embed_size,:]
+W = model.get_layer('gru_a').get_weights()[0][:embed_size,:]
 dump_embedding_layer_impl('gru_a_embed_sig', np.dot(E, W), f, hf)
-W = model.layers[18].get_weights()[0][embed_size:2*embed_size,:]
+W = model.get_layer('gru_a').get_weights()[0][embed_size:2*embed_size,:]
 dump_embedding_layer_impl('gru_a_embed_pred', np.dot(E, W), f, hf)
 E = model.get_layer('embed_exc').get_weights()[0]
-W = model.layers[18].get_weights()[0][2*embed_size:3*embed_size,:]
+W = model.get_layer('gru_a').get_weights()[0][2*embed_size:3*embed_size,:]
 dump_embedding_layer_impl('gru_a_embed_exc', np.dot(E, W), f, hf)
-W = model.layers[18].get_weights()[0][3*embed_size:,:]
+W = model.get_layer('gru_a').get_weights()[0][3*embed_size:,:]
 #FIXME: dump only half the biases
-b = model.layers[18].get_weights()[2]
+b = model.get_layer('gru_a').get_weights()[2]
 dump_dense_layer_impl('gru_a_dense_feature', W, b, 'LINEAR', f, hf)
 
 layer_list = []
--