ref: 4dbca378d69a45170b6f44d67d9c81d704af5827
parent: 1cb7a8ce327f282796f022d51503b0a5b92786cf
parent: c065dd9cc39be75c6ba33d2aa36111ee25689c87
author: eli <eli@singularity>
date: Tue Dec 24 11:47:09 EST 2024
merge
--- a/anagrams.c
+++ b/anagrams.c
@@ -122,6 +122,9 @@
dict = calloc(1, sizeof(dlist));
+ if (argc < 2)
+ goto USAGE;
+
for(i = 1; i < argc; i++){
wordsfilename = argv[i];
if(argv[i][0] == '-'){
@@ -134,7 +137,8 @@
}
}else continue;
- fprint(2, "usage:\n\t%s [-l[ength] minlen] [-c[ommonenglishwords] wordsfile\n", argv[0]);
+USAGE:
+ fprint(2, "usage: %s [-l[ength] minlen] [-c[ommonenglishwords] wordsfile\n", argv[0]);
exits("usage");
}
--- /dev/null
+++ b/ann.c
@@ -1,0 +1,838 @@
+#include <u.h>
+#include <libc.h>
+#include <ctype.h>
+
+#define RAND_MAX 0xFFFF
+#define fmax(a,b) (a > b? a: b)
+
+typedef struct Ann Ann;
+typedef struct Layer Layer;
+typedef struct Neuron Neuron;
+typedef struct Weights Weights;
+
+struct Ann {
+ int n;
+ double rate;
+ Layer **layers;
+ Weights **weights;
+ Weights **deltas;
+ void *user;
+ void *internal;
+};
+
+struct Layer {
+ int n;
+ Neuron **neurons;
+};
+
+struct Neuron {
+ double (*activation)(Neuron*);
+ double (*gradient)(Neuron*);
+ double steepness;
+ double value;
+ double sum;
+ void *user;
+ void *internal;
+};
+
+struct Weights {
+ int inputs;
+ int outputs;
+ double **values;
+};
+
+double activation_sigmoid(Neuron*);
+double gradient_sigmoid(Neuron*);
+double activation_tanh(Neuron*);
+double gradient_tanh(Neuron*);
+double activation_leaky_relu(Neuron*);
+double gradient_leaky_relu(Neuron*);
+
+#define ACTIVATION activation_leaky_relu
+#define GRADIENT gradient_leaky_relu
+
+Ann *anncreate(int, ...);
+Ann *anncreatev(int, int*);
+Layer *layercreate(int, double(*)(Neuron*), double(*)(Neuron*));
+Neuron *neuroninit(Neuron*, double (*)(Neuron*), double (*)(Neuron*), double);
+Neuron *neuroncreate(double (*)(Neuron*), double (*)(Neuron*), double);
+Weights *weightsinitrand(Weights*);
+Weights *weightsinitrandscale(Weights*, double);
+Weights *weightsinitdouble(Weights*, double);
+Weights *weightsinitdoubles(Weights*, double*);
+Weights *weightscreate(int, int, int);
+double *annrun(Ann*, double*);
+double anntrain(Ann*, double*, double*);
+
+typedef struct Adam Adam;
+
+struct Adam {
+ double rate;
+ double beta1;
+ double beta2;
+ Weights **first;
+ Weights **second;
+ double epsilon;
+ int timestep;
+};
+
+double anntrain_adam(Ann*, double*, double*);
+double anntrain_adamax(Ann*, double*, double*);
+
+double
+activation_sigmoid(Neuron *in)
+{
+ return 1.0/(1.0+exp(-in->sum));
+}
+
+double
+gradient_sigmoid(Neuron *in)
+{
+ double y = in->value;
+ return y * (1.0 - y);
+}
+
+double
+activation_tanh(Neuron *in)
+{
+ return tanh(in->sum);
+}
+
+double
+gradient_tanh(Neuron *in)
+{
+ return 1.0 - in->value*in->value;
+}
+
+double
+activation_leaky_relu(Neuron *in)
+{
+ if (in->sum > 0)
+ return in->sum;
+ return in->sum * 0.01;
+}
+
+double
+gradient_leaky_relu(Neuron *in)
+{
+ if (in->sum > 0)
+ return 1.0;
+ return 0.01;
+}
+
+Weights*
+weightsinitdoubles(Weights *in, double *init)
+{
+ int i, o;
+
+ for (i = 0; i <= in->inputs; i++)
+ for (o = 0; o < in->outputs; o++)
+ in->values[i][o] = init[o];
+
+ return in;
+}
+
+Weights*
+weightsinitdouble(Weights *in, double init)
+{
+ int i, o;
+
+ for (i = 0; i <= in->inputs; i++)
+ for (o = 0; o < in->outputs; o++)
+ in->values[i][o] = init;
+
+ return in;
+}
+
+Weights*
+weightsinitrandscale(Weights *in, double scale)
+{
+ int i, o;
+
+ srand(time(0));
+ for (i = 0; i <= in->inputs; i++)
+ for (o = 0; o < in->outputs; o++)
+ in->values[i][o] = (((double)rand()/RAND_MAX) - 0.5) * scale;
+
+ return in;
+}
+
+Weights*
+weightsinitrand(Weights *in)
+{
+ weightsinitrandscale(in, 2.0);
+ return in;
+}
+
+Neuron*
+neuroninit(Neuron *in, double (*activation)(Neuron*), double (*gradient)(Neuron*), double steepness)
+{
+ in->activation = activation;
+ in->gradient = gradient;
+ in->steepness = steepness;
+ in->value = 1.0;
+ in->sum = 0;
+ return in;
+}
+
+Neuron*
+neuroncreate(double (*activation)(Neuron*), double (*gradient)(Neuron*), double steepness)
+{
+ Neuron *ret = calloc(1, sizeof(Neuron));
+ neuroninit(ret, activation, gradient, steepness);
+ return ret;
+}
+
+Layer*
+layercreate(int num_neurons, double(*activation)(Neuron*), double(*gradient)(Neuron*))
+{
+ Layer *ret = calloc(1, sizeof(Layer));
+ int i;
+
+ ret->n = num_neurons;
+ ret->neurons = calloc(num_neurons+1, sizeof(Neuron*));
+ for (i = 0; i <= ret->n; i++) {
+ ret->neurons[i] = neuroncreate(activation, gradient, 1.0);
+ }
+ return ret;
+}
+
+Weights*
+weightscreate(int inputs, int outputs, int initialize)
+{
+ int i;
+ Weights *ret = calloc(1, sizeof(Weights));
+ ret->inputs = inputs;
+ ret->outputs = outputs;
+ ret->values = calloc(inputs+1, sizeof(double*));
+ for (i = 0; i <= inputs; i++)
+ ret->values[i] = calloc(outputs, sizeof(double));
+ if (initialize)
+ weightsinitrand(ret);
+ return ret;
+}
+
+Ann*
+anncreate(int num_layers, ...)
+{
+ Ann *ret = calloc(1, sizeof(Ann));
+ va_list args;
+ int arg;
+ int i;
+
+ va_start(args, num_layers);
+ ret->n = num_layers;
+ ret->rate = 0.7;
+ ret->layers = calloc(num_layers, sizeof(Layer*));
+ ret->weights = calloc(num_layers-1, sizeof(Weights*));
+ ret->deltas = calloc(num_layers-1, sizeof(Weights*));
+
+ for (i = 0; i < num_layers; i++) {
+ arg = va_arg(args, int);
+ if (arg < 0 || arg > 1000000)
+ arg = 0;
+ ret->layers[i] = layercreate(arg, ACTIVATION, GRADIENT);
+ if (i > 0) {
+ ret->weights[i-1] = weightscreate(ret->layers[i-1]->n, ret->layers[i]->n, 1);
+ ret->deltas[i-1] = weightscreate(ret->layers[i-1]->n, ret->layers[i]->n, 0);
+ }
+ }
+
+ va_end(args);
+
+ return ret;
+}
+
+Ann*
+anncreatev(int num_layers, int *layers)
+{
+ Ann *ret = calloc(1, sizeof(Ann));
+ int arg;
+ int i;
+
+ ret->n = num_layers;
+ ret->rate = 0.7;
+ ret->layers = calloc(num_layers, sizeof(Layer*));
+ ret->weights = calloc(num_layers-1, sizeof(Weights*));
+ ret->deltas = calloc(num_layers-1, sizeof(Weights*));
+
+ for (i = 0; i < num_layers; i++) {
+ arg = layers[i];
+ if (arg < 0 || arg > 1000000)
+ arg = 0;
+ if (i < (num_layers-1))
+ ret->layers[i] = layercreate(arg, ACTIVATION, GRADIENT);
+ else
+ ret->layers[i] = layercreate(arg, activation_sigmoid, gradient_sigmoid);
+ if (i > 0) {
+ ret->weights[i-1] = weightscreate(ret->layers[i-1]->n, ret->layers[i]->n, 1);
+ ret->deltas[i-1] = weightscreate(ret->layers[i-1]->n, ret->layers[i]->n, 0);
+ }
+ }
+
+ return ret;
+}
+
+double*
+annrun(Ann *ann, double *input)
+{
+ int l, i, o;
+ int outputs = ann->layers[ann->n - 1]->n;
+ double *ret = calloc(outputs, sizeof(double));
+ Neuron *O;
+ double sum;
+
+ for (i = 0; i < ann->layers[0]->n; i++)
+ ann->layers[0]->neurons[i]->value = input[i];
+
+ for (l = 1; l < ann->n; l++) {
+ for (o = 0; o < ann->layers[l]->n; o++) {
+ O = ann->layers[l]->neurons[o];
+ O->sum = ann->weights[l-1]->values[ann->weights[l-1]->inputs][o]; // bias
+ sum = O->sum;
+ #pragma omp parallel for reduction (+:sum)
+ for (i = 0; i < ann->layers[l-1]->n; i++)
+ sum += ann->layers[l-1]->neurons[i]->value * ann->weights[l-1]->values[i][o];
+ O->sum = sum;
+ O->value = O->activation(O);
+ }
+ }
+
+ for (o = 0; o < outputs; o++)
+ ret[o] = ann->layers[ann->n - 1]->neurons[o]->value;
+
+ return ret;
+}
+
+double
+anntrain(Ann *ann, double *inputs, double *outputs)
+{
+ double *error = annrun(ann, inputs);
+ double ret = 0.0;
+ int noutputs = ann->layers[ann->n-1]->n;
+ double acc, sum;
+ int o, i, w, n;
+ Neuron *O, *I;
+ Weights *W, *D, *D2;
+
+ for (o = 0; o < noutputs; o++) {
+ // error = outputs[o] - result
+ error[o] -= outputs[o];
+ error[o] = -error[o];
+ ret += pow(error[o], 2.0) * 0.5;
+ if (error[o] < -.9999999)
+ error[o] = -17.0;
+ else if (error[o] > .9999999)
+ error[o] = 17.0;
+ else
+ error[o] = log((1.0 + error[o]) / (1.0 - error[o]));
+ }
+ D = ann->deltas[ann->n-2];
+ weightsinitdoubles(D, error);
+ for (i = 0; i < (ann->n-2); i++) {
+ D = ann->deltas[i];
+ weightsinitdouble(D, 1.0);
+ }
+
+ // backpropagate MSE
+ D2 = ann->deltas[ann->n-2];
+ for (w = ann->n-2; w >= 0; w--) {
+ D = ann->deltas[w];
+
+ for (o = 0; o < ann->layers[w+1]->n; o++) {
+ O = ann->layers[w+1]->neurons[o];
+ acc = O->gradient(O) * O->steepness;
+ sum = 1.0;
+ if (D2 != D) {
+ W = ann->weights[w + 1];
+ sum = 0.0;
+ #pragma omp parallel for reduction (+:sum)
+ for (n = 0; n < D2->outputs; n++)
+ sum += D2->values[o][n] * W->values[o][n];
+ }
+ for (i = 0; i <= ann->layers[w]->n; i++) {
+ D->values[i][o] *= acc * sum;
+ }
+ }
+
+ D2 = D;
+ }
+
+ // update weights
+ for (w = 0; w < ann->n-1; w++) {
+ W = ann->weights[w];
+ D = ann->deltas[w];
+
+ for (i = 0; i <= W->inputs; i++) {
+ I = ann->layers[w]->neurons[i];
+ for (o = 0; o < W->outputs; o++) {
+ W->values[i][o] += D->values[i][o] * ann->rate * I->value;
+ }
+ }
+ }
+
+ free(error);
+ return ret;
+}
+
+Ann*
+adaminit(Ann *ann)
+{
+ int i;
+ Adam *I = calloc(1, sizeof(Adam));
+
+ I->rate = 0.001;
+ I->beta1 = 0.9;
+ I->beta2 = 0.999;
+ I->epsilon = 10e-8;
+ I->timestep = 0;
+ I->first = calloc(ann->n-1, sizeof(Weights*));
+ I->second = calloc(ann->n-1, sizeof(Weights*));
+
+ for (i = 0; i < (ann->n-1); i++) {
+ I->first[i] = weightscreate(ann->layers[i]->n, ann->layers[i+1]->n, 0);
+ I->second[i] = weightscreate(ann->layers[i]->n, ann->layers[i+1]->n, 0);
+ }
+
+ ann->internal = I;
+
+ return ann;
+}
+
+double
+anntrain_adam(Ann *ann, double *inputs, double *outputs)
+{
+ double *error = annrun(ann, inputs);
+ double ret = 0.0;
+ int noutputs = ann->layers[ann->n-1]->n;
+ double acc, sum, m, v;
+ int o, i, w, n;
+ Neuron *O, *I;
+ Weights *W, *D, *D2, *M, *V;
+ Adam *annI;
+
+ if (ann->internal == 0)
+ adaminit(ann);
+ annI = ann->internal;
+ annI->timestep++;
+
+ for (o = 0; o < noutputs; o++) {
+ // error = outputs[o] - result
+ error[o] -= outputs[o];
+ error[o] = -error[o];
+ ret += pow(error[o], 2.0) * 0.5;
+ }
+ D = ann->deltas[ann->n-2];
+ weightsinitdoubles(D, error);
+ for (i = 0; i < (ann->n-2); i++) {
+ D = ann->deltas[i];
+ weightsinitdouble(D, 1.0);
+ }
+
+ // backpropagate MSE
+ D2 = ann->deltas[ann->n-2];
+ for (w = ann->n-2; w >= 0; w--) {
+ D = ann->deltas[w];
+ M = annI->first[w];
+ V = annI->second[w];
+
+ for (o = 0; o < ann->layers[w+1]->n; o++) {
+ O = ann->layers[w+1]->neurons[o];
+ acc = O->gradient(O) * O->steepness;
+ sum = 1.0;
+ if (D2 != D) {
+ W = ann->weights[w+1];
+ sum = 0.0;
+ #pragma omp parallel for reduction (+:sum)
+ for (n = 0; n < D2->outputs; n++)
+ sum += D2->values[o][n] * W->values[o][n];
+ }
+ for (i = 0; i <= ann->layers[w]->n; i++) {
+ I = ann->layers[w]->neurons[i];
+ D->values[i][o] *= acc * sum;
+ M->values[i][o] *= annI->beta1;
+ M->values[i][o] += (1.0 - annI->beta1) * D->values[i][o] * I->value;
+ V->values[i][o] *= annI->beta2;
+ V->values[i][o] += (1.0 - annI->beta2) * D->values[i][o] * D->values[i][o] * I->value * I->value;
+ }
+ }
+
+ D2 = D;
+ }
+
+ // update weights
+ for (w = 0; w < ann->n-1; w++) {
+ W = ann->weights[w];
+ M = annI->first[w];
+ V = annI->second[w];
+
+ for (i = 0; i <= W->inputs; i++) {
+ for (o = 0; o < W->outputs; o++) {
+ m = M->values[i][o] / (annI->timestep < 100? (1.0 - pow(annI->beta1, annI->timestep)): 1.0);
+ v = V->values[i][o] / (annI->timestep < 10000? (1.0 - pow(annI->beta2, annI->timestep)): 1.0);
+ W->values[i][o] += (m / (sqrt(v) + annI->epsilon)) * annI->rate;
+ }
+ }
+ }
+
+ free(error);
+ return ret;
+}
+
+double
+anntrain_adamax(Ann *ann, double *inputs, double *outputs)
+{
+ double *error = annrun(ann, inputs);
+ double ret = 0.0;
+ int noutputs = ann->layers[ann->n-1]->n;
+ double acc, sum, m, v;
+ int o, i, w, n;
+ Neuron *O, *I;
+ Weights *W, *D, *D2, *M, *V;
+ Adam *annI;
+
+ if (ann->internal == 0)
+ adaminit(ann);
+ annI = ann->internal;
+ annI->rate = 0.002;
+ annI->timestep++;
+
+ for (o = 0; o < noutputs; o++) {
+ // error = outputs[o] - result
+ error[o] -= outputs[o];
+ error[o] = -error[o];
+ ret += pow(error[o], 2.0) * 0.5;
+ }
+ D = ann->deltas[ann->n-2];
+ weightsinitdoubles(D, error);
+ for (i = 0; i < (ann->n-2); i++) {
+ D = ann->deltas[i];
+ weightsinitdouble(D, 1.0);
+ }
+
+ // backpropagate MSE
+ D2 = ann->deltas[ann->n-2];
+ for (w = ann->n-2; w >= 0; w--) {
+ D = ann->deltas[w];
+ M = annI->first[w];
+ V = annI->second[w];
+
+ for (o = 0; o < ann->layers[w+1]->n; o++) {
+ O = ann->layers[w+1]->neurons[o];
+ acc = O->gradient(O) * O->steepness;
+ sum = 1.0;
+ if (D2 != D) {
+ W = ann->weights[w+1];
+ sum = 0.0;
+ #pragma omp parallel for reduction (+:sum)
+ for (n = 0; n < D2->outputs; n++)
+ sum += D2->values[o][n] * W->values[o][n];
+ }
+ for (i = 0; i <= ann->layers[w]->n; i++) {
+ I = ann->layers[w]->neurons[i];
+ D->values[i][o] *= acc * sum;
+ M->values[i][o] *= annI->beta1;
+ M->values[i][o] += (1.0 - annI->beta1) * D->values[i][o] * I->value;
+ V->values[i][o] = fmax(V->values[i][o] * annI->beta2, fabs(D->values[i][o] * I->value));
+ }
+ }
+
+ D2 = D;
+ }
+
+ // update weights
+ for (w = 0; w < ann->n-1; w++) {
+ W = ann->weights[w];
+ M = annI->first[w];
+ V = annI->second[w];
+
+ for (i = 0; i <= W->inputs; i++) {
+ for (o = 0; o < W->outputs; o++) {
+ m = M->values[i][o];
+ v = V->values[i][o];
+ W->values[i][o] += (annI->rate/(1.0 - (annI->timestep < 100? pow(annI->beta1, annI->timestep): 0.0))) * (m/v);
+ }
+ }
+ }
+
+ free(error);
+ return ret;
+}
+
+int
+saveann(char *filename, Ann *ann)
+{
+ Weights *W;
+ int i, j, k;
+ int fd = create(filename, OWRITE, 0644);
+ if (fd < 0) {
+ perror("create");
+ return -1;
+ }
+
+ fprint(fd, "%d\n", ann->n);
+ for (i = 0; i < ann->n; i++)
+ fprint(fd, "%d\n", ann->layers[i]->n);
+
+ for (i = 0; i < (ann->n - 1); i++) {
+ W = ann->weights[i];
+ for (j = 0; j <= W->inputs; j++)
+ for (k = 0; k < W->outputs; k++)
+ fprint(fd, "%f\n", W->values[j][k]);
+ }
+
+ close(fd);
+ return 0;
+}
+
+char *retline = nil;
+
+char*
+readline(int fd)
+{
+ static int length;
+ static int offset;
+ char *epos;
+ int r;
+
+ if (retline == nil) {
+ length = 0x1000;
+ offset = 0;
+ retline = malloc(length);
+ retline[offset] = '\0';
+ }
+
+ r = strlen(retline);
+ if (r > 0) {
+ r++;
+ memmove(retline, &retline[r], length - r);
+ length -= r;
+ offset = strlen(retline);
+ }
+
+ if (length < 0x1000) {
+ retline = realloc(retline, length + 0x1000);
+ length += 0x1000;
+ }
+
+ do {
+ epos = strchr(retline, '\n');
+ if (epos != nil) {
+ *epos = '\0';
+ return retline;
+ }
+
+ r = read(fd, &retline[offset], length - offset - 1);
+ if (r < 0) {
+ perror("read");
+ exits("read");
+ }
+ if (r > 0) {
+ offset += r;
+ retline[offset] = '\0';
+ length += r;
+ retline = realloc(retline, length);
+ }
+ } while(r != 0);
+
+ return nil;
+}
+
+Ann*
+loadann(char *filename)
+{
+ Weights *W;
+ Ann *ann = nil;
+ char *buf;
+ int i, j, k;
+ int num_layers, *layers;
+ int fd = open(filename, OREAD);
+ if (fd < 0) {
+ perror("open");
+ return ann;
+ }
+
+ buf = readline(fd);
+ num_layers = atoi(buf);
+
+ if (num_layers < 2) {
+ fprint(2, "num_layers < 2\n");
+ return ann;
+ }
+
+ layers = calloc(num_layers, sizeof(int));
+ for (i = 0; i < num_layers; i++) {
+ buf = readline(fd);
+ layers[i] = atoi(buf);
+ }
+
+ ann = anncreatev(num_layers, layers);
+ for (i = 0; i < (ann->n - 1); i++) {
+ W = ann->weights[i];
+ for (j = 0; j <= W->inputs; j++)
+ for (k = 0; k < W->outputs; k++) {
+ buf = readline(fd);
+ W->values[j][k] = atof(buf);
+ }
+ }
+
+ free(retline);
+ retline = nil;
+
+ return ann;
+}
+
+void
+usage(char **argv)
+{
+ fprint(2, "usage: %s [-train] filename [num_layers num_input_layer ... num_output_layer]\n", argv[0]);
+ exits("usage");
+}
+
+void
+main(int argc, char **argv)
+{
+ Ann *ann;
+ char *filename;
+ int train;
+ Dir *dir;
+ int num_layers = 0;
+ int *layers = nil;
+ int i;
+ char *line;
+ double *input;
+ double *output = nil;
+ int ninput;
+ int noutput;
+ int offset;
+ double f;
+ int trainline;
+ int nline;
+
+ train = 0;
+
+ if (argc < 2)
+ usage(argv);
+
+ filename = argv[1];
+
+ if (argv[1][0] == '-' && argv[1][1] == 't') {
+ if (argc < 3)
+ usage(argv);
+
+ train = 1;
+ filename = argv[2];
+ }
+
+ ann = nil;
+ dir = dirstat(filename);
+ if (dir != nil) {
+ free(dir);
+ ann = loadann(filename);
+ if (ann == nil)
+ exits("loadann");
+ }
+
+ if (argc >= (train + 3)) {
+ num_layers = atoi(argv[train + 2]);
+
+ if (num_layers < 2 || argc != (train + 3 + num_layers))
+ usage(argv);
+
+ layers = calloc(num_layers, sizeof(int));
+
+ for (i = 0; i < num_layers; i++)
+ layers[i] = atoi(argv[train + 3 + i]);
+ }
+
+ if (num_layers > 0) {
+ if (ann != nil) {
+ if (ann->n != num_layers) {
+ fprint(2, "num_layers: %d != %d\n", ann->n, num_layers);
+ exits("num_layers");
+ }
+
+ for (i = 0; i < num_layers; i++) {
+ if (layers[i] != ann->layers[i]->n) {
+ fprint(2, "num_layer_%d: %d != %d\n", i, layers[i], ann->layers[i]->n);
+ exits("num_layer");
+ }
+ }
+ } else {
+ ann = anncreatev(num_layers, layers);
+ if (ann == nil)
+ exits("anncreatev");
+ }
+ }
+
+ if (ann == nil) {
+ fprint(2, "file not found: %s\n", filename);
+ exits("file not found");
+ }
+
+ ninput = ann->layers[0]->n;
+ noutput = ann->layers[ann->n - 1]->n;
+ input = calloc(ninput, sizeof(double));
+ if (train == 1)
+ output = calloc(noutput, sizeof(double));
+
+ trainline = 0;
+ nline = ninput;
+
+ do {
+ int i = 0;
+ while ((line = readline(0)) != nil) {
+ do {
+ if (strlen(line) == 0)
+ break;
+ while(isspace(*line))
+ line++;
+ if (strlen(line) == 0)
+ break;
+ offset = 0;
+ while (isdigit(line[offset]) || line[offset] == '.' || line[offset] == '-')
+ offset++;
+ if (!isspace(line[offset]) && line[offset] != '\0') {
+ fprint(2, "input error: %s\n", line);
+ exits("input");
+ }
+ f = atof(line);
+ if (trainline == 0) {
+ input[i] = f;
+ i++;
+ } else {
+ output[i] = f;
+ i++;
+ }
+ line = &line[offset];
+ while(isspace(*line))
+ line++;
+ } while(i < nline && strlen(line) > 0);
+
+ if (i == nline) {
+ if (train == 0) {
+ output = annrun(ann, input);
+ for (i = 0; i < noutput; i++)
+ fprint(2, "%f ", output[i]);
+ fprint(2, "\n");
+ free(output);
+ } else {
+ if (trainline == 0) {
+ trainline = 1;
+ nline = noutput;
+ } else {
+ anntrain(ann, input, output);
+ trainline = 0;
+ nline = ninput;
+ }
+ }
+ i = 0;
+ }
+ }
+ } while(line != nil);
+
+ if (saveann(filename, ann) != 0)
+ exits("saveann");
+
+ exits(nil);
+}