shithub: opus

Download patch

ref: d961d009a0b19896fe3f959929ea626309b6e40c
parent: 4de3e53a737b9991343e0a24797844c2dfb7fe4f
author: Jean-Marc Valin <jmvalin@jmvalin.ca>
date: Wed Nov 28 15:20:17 EST 2018

Managing to actually use sparse matrices

Now 2x real-time!

--- a/dnn/dump_lpcnet.py
+++ b/dnn/dump_lpcnet.py
@@ -67,11 +67,19 @@
     A[:,N:2*N] = A[:,N:2*N] - np.diag(np.diag(A[:,N:2*N]))
     A[:,2*N:] = A[:,2*N:] - np.diag(np.diag(A[:,2*N:]))
     printVector(f, diag, name + '_diag')
+    idx = np.zeros((0,), dtype='int')
     for i in range(3*N//16):
+        pos = idx.shape[0]
+        idx = np.append(idx, -1)
+        nb_nonzero = 0
         for j in range(N):
-            W = np.concatenate([W, A[j, i*16:(i+1)*16]])
+            if np.sum(np.abs(A[j, i*16:(i+1)*16])) > 1e-10:
+                nb_nonzero = nb_nonzero + 1
+                idx = np.append(idx, j)
+                W = np.concatenate([W, A[j, i*16:(i+1)*16]])
+        idx[pos] = nb_nonzero
     printVector(f, W, name)
-    idx = np.tile(np.concatenate([np.array([N]), np.arange(N)]), 3*N//16)
+    #idx = np.tile(np.concatenate([np.array([N]), np.arange(N)]), 3*N//16)
     printVector(f, idx, name + '_idx', dtype='int')
     return;
 
--- a/dnn/lpcnet.py
+++ b/dnn/lpcnet.py
@@ -57,7 +57,7 @@
             pass
         else:
             #print("constrain");
-            layer = self.model.get_layer('cu_dnngru_1')
+            layer = self.model.get_layer('gru_a')
             w = layer.get_weights()
             p = w[1]
             nb = p.shape[1]//p.shape[0]
@@ -72,6 +72,7 @@
             for k in range(nb):
                 A = p[:, k*N:(k+1)*N]
                 A = A - np.diag(np.diag(A))
+                A = np.transpose(A, (1, 0))
                 L=np.reshape(A, (N, N//16, 16))
                 S=np.sum(L*L, axis=-1)
                 SS=np.sort(np.reshape(S, (-1,)))
@@ -79,6 +80,7 @@
                 mask = (S>=thresh).astype('float32');
                 mask = np.repeat(mask, 16, axis=1)
                 mask = np.minimum(1, mask + np.diag(np.ones((N,))))
+                mask = np.transpose(mask, (1, 0))
                 p[:, k*N:(k+1)*N] = p[:, k*N:(k+1)*N]*mask
                 #print(thresh, np.mean(mask))
             w[1] = p
--- a/dnn/train_lpcnet.py
+++ b/dnn/train_lpcnet.py
@@ -139,7 +139,7 @@
 in_data = np.concatenate([in_data, pred], axis=-1)
 
 # dump models to disk as we go
-checkpoint = ModelCheckpoint('lpcnet9_384_10_G16_{epoch:02d}.h5')
+checkpoint = ModelCheckpoint('lpcnet9b_384_10_G16_{epoch:02d}.h5')
 
 #model.load_weights('wavenet4f2_30.h5')
 model.compile(optimizer=Adam(0.001, amsgrad=True, decay=5e-5), loss='sparse_categorical_crossentropy', metrics=['sparse_categorical_accuracy'])
--