ref: dc539a9ce90a2ba7e042425a39d0a640ca6a5f87
parent: 2e18f0d160e4d5d8e786ef4481ac1702d0ce3ad3
author: Jean-Marc Valin <jmvalin@amazon.com>
date: Sat Feb 5 12:05:31 EST 2022
WIP non-causal PLC
--- a/dnn/lpcnet_demo.c
+++ b/dnn/lpcnet_demo.c
@@ -141,6 +141,7 @@
ret = fread(pcm, sizeof(pcm[0]), FRAME_SIZE, fin);
if (feof(fin) || ret != FRAME_SIZE) break;
if (count % 2 == 0) loss = rand() < RAND_MAX*(float)plc_percent/100.f;
+ //if (count % 2 == 0) scanf("%d", &loss);
if (loss) lpcnet_plc_conceal(net, pcm);
else lpcnet_plc_update(net, pcm);
fwrite(pcm, sizeof(pcm[0]), FRAME_SIZE, fout);
--- a/dnn/lpcnet_plc.c
+++ b/dnn/lpcnet_plc.c
@@ -70,6 +70,7 @@
_lpcnet_compute_dense(&plc_out, out, net->plc_gru2_state);
}
+#if 0
LPCNET_EXPORT int lpcnet_plc_update(LPCNetPLCState *st, short *pcm) {
int i;
float x[FRAME_SIZE];
@@ -196,3 +197,90 @@
st->blend = 1;
return 0;
}
+
+#else
+
+LPCNET_EXPORT int lpcnet_plc_update(LPCNetPLCState *st, short *pcm) {
+ int i;
+ float x[FRAME_SIZE];
+ short pcm_save[FRAME_SIZE];
+ float plc_features[2*NB_BANDS+NB_FEATURES+1];
+ RNN_COPY(pcm_save, pcm, FRAME_SIZE);
+ for (i=0;i<FRAME_SIZE;i++) x[i] = pcm[i];
+ burg_cepstral_analysis(plc_features, x);
+ st->enc.pcount = 0;
+ if (st->loss_count > 0) {
+ LPCNetState copy;
+ /* Handle blending. */
+ short tmp[FRAME_SIZE-TRAINING_OFFSET];
+ float zeros[2*NB_BANDS+NB_FEATURES+1] = {0};
+ RNN_COPY(zeros, plc_features, 2*NB_BANDS);
+ zeros[2*NB_BANDS+NB_FEATURES] = 1;
+ compute_plc_pred(&st->plc_net, st->features, zeros);
+ lpcnet_synthesize_tail_impl(&st->lpcnet, st->pcm, FRAME_SIZE-TRAINING_OFFSET, 0);
+ lpcnet_synthesize_impl(&st->lpcnet, st->features, &st->pcm[FRAME_SIZE-TRAINING_OFFSET], TRAINING_OFFSET, 0);
+
+ copy = st->lpcnet;
+ lpcnet_synthesize_tail_impl(&st->lpcnet, tmp, FRAME_SIZE-TRAINING_OFFSET, 0);
+ st->lpcnet = copy;
+ for (i=0;i<FRAME_SIZE-TRAINING_OFFSET;i++) {
+ float w;
+ w = .5 - .5*cos(M_PI*i/(FRAME_SIZE-TRAINING_OFFSET));
+ pcm_save[i] = (int)floor(.5 + w*pcm_save[i] + (1-w)*tmp[i]);
+ }
+
+ for (i=0;i<FRAME_SIZE;i++) x[i] = st->pcm[i];
+ preemphasis(x, &st->enc.mem_preemph, x, PREEMPHASIS, FRAME_SIZE);
+ compute_frame_features(&st->enc, x);
+ process_single_frame(&st->enc, NULL);
+
+ }
+ for (i=0;i<FRAME_SIZE;i++) x[i] = pcm[i];
+ preemphasis(x, &st->enc.mem_preemph, x, PREEMPHASIS, FRAME_SIZE);
+ compute_frame_features(&st->enc, x);
+ process_single_frame(&st->enc, NULL);
+ if (st->loss_count == 0) {
+ RNN_COPY(&plc_features[2*NB_BANDS], st->enc.features[0], NB_FEATURES);
+ plc_features[2*NB_BANDS+NB_FEATURES] = 1;
+ compute_plc_pred(&st->plc_net, st->features, plc_features);
+ lpcnet_synthesize_tail_impl(&st->lpcnet, st->pcm, FRAME_SIZE-TRAINING_OFFSET, FRAME_SIZE-TRAINING_OFFSET);
+ lpcnet_synthesize_impl(&st->lpcnet, st->enc.features[0], &st->pcm[FRAME_SIZE-TRAINING_OFFSET], TRAINING_OFFSET, TRAINING_OFFSET);
+ }
+ RNN_COPY(pcm, st->pcm, FRAME_SIZE);
+ RNN_COPY(st->pcm, pcm_save, FRAME_SIZE);
+ st->loss_count = 0;
+ return 0;
+}
+
+static const float att_table[10] = {0, 0, -.2, -.2, -.4, -.4, -.8, -.8, -1.6, -1.6};
+LPCNET_EXPORT int lpcnet_plc_conceal(LPCNetPLCState *st, short *pcm) {
+ int i;
+ float x[FRAME_SIZE];
+ float zeros[2*NB_BANDS+NB_FEATURES+1] = {0};
+ st->enc.pcount = 0;
+
+ compute_plc_pred(&st->plc_net, st->features, zeros);
+ if (st->loss_count >= 10) st->features[0] = MAX16(-10, st->features[0]+att_table[9] - 2*(st->loss_count-9));
+ else st->features[0] = MAX16(-10, st->features[0]+att_table[st->loss_count]);
+ if (st->loss_count > 4) st->features[NB_FEATURES-1] = MAX16(-.5, st->features[NB_FEATURES-1]-.1*(st->loss_count-4));
+
+ if (st->loss_count == 0) {
+ RNN_COPY(pcm, st->pcm, FRAME_SIZE);
+ lpcnet_synthesize_tail_impl(&st->lpcnet, st->pcm, FRAME_SIZE-TRAINING_OFFSET, FRAME_SIZE-TRAINING_OFFSET);
+ lpcnet_synthesize_impl(&st->lpcnet, st->features, &st->pcm[FRAME_SIZE-TRAINING_OFFSET], TRAINING_OFFSET, TRAINING_OFFSET);
+ } else {
+ lpcnet_synthesize_tail_impl(&st->lpcnet, pcm, FRAME_SIZE-TRAINING_OFFSET, 0);
+ lpcnet_synthesize_impl(&st->lpcnet, st->features, &pcm[FRAME_SIZE-TRAINING_OFFSET], TRAINING_OFFSET, 0);
+
+ for (i=0;i<FRAME_SIZE;i++) x[i] = pcm[i];
+ preemphasis(x, &st->enc.mem_preemph, x, PREEMPHASIS, FRAME_SIZE);
+ compute_frame_features(&st->enc, x);
+ process_single_frame(&st->enc, NULL);
+ }
+
+
+ st->loss_count++;
+ return 0;
+}
+
+#endif
--- a/dnn/training_tf2/plc_loader.py
+++ b/dnn/training_tf2/plc_loader.py
@@ -51,9 +51,15 @@
lost = self.lost_offset[self.lost_indices[index*self.batch_size:(index+1)*self.batch_size], :]
lost = np.reshape(lost, (features.shape[0], features.shape[1], 1))
lost_mask = np.tile(lost, (1,1,features.shape[2]))
+ in_features = features*lost_mask
+
+ #For the first frame after a loss, we don't have valid features, but the Burg estimate is valid.
+ in_features[:,1:,self.nb_burg_features:] = in_features[:,1:,self.nb_burg_features:]*lost_mask[:,:-1,self.nb_burg_features:]
+ out_lost = np.copy(lost)
+ out_lost[:,1:,:] = out_lost[:,1:,:]*out_lost[:,:-1,:]
- out_features = np.concatenate([features[:,:,self.nb_burg_features:], 1.-lost], axis=-1)
- inputs = [features*lost_mask, lost]
+ out_features = np.concatenate([features[:,:,self.nb_burg_features:], 1.-out_lost], axis=-1)
+ inputs = [in_features*lost_mask, lost]
outputs = [out_features]
return (inputs, outputs)
--- a/dnn/training_tf2/train_plc.py
+++ b/dnn/training_tf2/train_plc.py
@@ -49,6 +49,7 @@
parser.add_argument('--decay', metavar='<decay>', type=float, help='learning rate decay')
parser.add_argument('--band-loss', metavar='<weight>', default=1.0, type=float, help='weight of band loss (default 1.0)')
parser.add_argument('--loss-bias', metavar='<bias>', default=0.0, type=float, help='loss bias towards low energy (default 0.0)')
+parser.add_argument('--non-causal', dest='non_causal', action='store_true', help='train non-causal model')
parser.add_argument('--logdir', metavar='<log dir>', help='directory for tensorboard log files')
@@ -97,11 +98,18 @@
if retrain:
input_model = args.retrain
+delay = not args.non_causal
+
def plc_loss(alpha=1.0, bias=0.):
def loss(y_true,y_pred):
- mask = .2 + .8*y_true[:,1:,-1:]
- y_true = y_true[:,1:,:-1]
- y_pred = y_pred[:,:-1,:]
+ if delay:
+ mask = .2 + .8*y_true[:,1:,-1:]
+ y_true = y_true[:,1:,:-1]
+ y_pred = y_pred[:,:-1,:]
+ else:
+ mask = y_true[:,:,-1:]
+ y_true = y_true[:,:,:-1]
+
e = (y_pred - y_true)*mask
e_bands = tf.signal.idct(e[:,:,:-2], norm='ortho')
l1_loss = K.mean(K.abs(e)) + bias*K.mean(K.maximum(e[:,:,:1], 0.)) + alpha*K.mean(K.abs(e_bands) + bias*K.maximum(e_bands, 0.))
@@ -110,9 +118,13 @@
def plc_l1_loss():
def L1_loss(y_true,y_pred):
- mask = y_true[:,1:,-1:]
- y_true = y_true[:,1:,:-1]
- y_pred = y_pred[:,:-1,:]
+ if delay:
+ mask = y_true[:,1:,-1:]
+ y_true = y_true[:,1:,:-1]
+ y_pred = y_pred[:,:-1,:]
+ else:
+ mask = y_true[:,:,-1:]
+ y_true = y_true[:,:,:-1]
e = (y_pred - y_true)*mask
l1_loss = K.mean(K.abs(e))
return l1_loss
@@ -121,8 +133,13 @@
def plc_band_loss():
def L1_band_loss(y_true,y_pred):
mask = y_true[:,1:,-1:]
- y_true = y_true[:,1:,:-1]
- y_pred = y_pred[:,:-1,:]
+ if delay:
+ mask = y_true[:,1:,-1:]
+ y_true = y_true[:,1:,:-1]
+ y_pred = y_pred[:,:-1,:]
+ else:
+ mask = y_true[:,:,-1:]
+ y_true = y_true[:,:,:-1]
e = (y_pred - y_true)*mask
e_bands = tf.signal.idct(e[:,:,:-2], norm='ortho')
l1_loss = K.mean(K.abs(e_bands))
--
⑨