ref: d61f7e00f889c749a75057eb16038ded713772dc
parent: ca0a43bee928fffa5057c8f9a46d584324850da5
author: Jean-Marc Valin <jmvalin@amazon.com>
date: Fri Jun 25 09:43:37 EDT 2021
Fix missing transpose in the sparity code CuDNNGRU and GRU don't use the same weight format
--- a/dnn/training_tf2/lpcnet.py
+++ b/dnn/training_tf2/lpcnet.py
@@ -82,7 +82,8 @@
density = 1 - (1-self.final_density[k])*(1 - r*r*r)
A = p[:, k*N:(k+1)*N]
A = A - np.diag(np.diag(A))
- #A = np.transpose(A, (1, 0))
+ #This is needed because of the CuDNNGRU strange weight ordering
+ A = np.transpose(A, (1, 0))
L=np.reshape(A, (N//4, 4, N//8, 8))
S=np.sum(L*L, axis=-1)
S=np.sum(S, axis=1)
@@ -92,7 +93,8 @@
mask = np.repeat(mask, 4, axis=0)
mask = np.repeat(mask, 8, axis=1)
mask = np.minimum(1, mask + np.diag(np.ones((N,))))
- #mask = np.transpose(mask, (1, 0))
+ #This is needed because of the CuDNNGRU strange weight ordering
+ mask = np.transpose(mask, (1, 0))
p[:, k*N:(k+1)*N] = p[:, k*N:(k+1)*N]*mask
#print(thresh, np.mean(mask))
w[1] = p
--
⑨