XCSF  1.4.7
XCSF learning classifier system
neural_layer.c
Go to the documentation of this file.
1 /*
2  * This program is free software: you can redistribute it and/or modify
3  * it under the terms of the GNU General Public License as published by
4  * the Free Software Foundation, either version 3 of the License, or
5  * (at your option) any later version.
6  *
7  * This program is distributed in the hope that it will be useful,
8  * but WITHOUT ANY WARRANTY; without even the implied warranty of
9  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10  * GNU General Public License for more details.
11  *
12  * You should have received a copy of the GNU General Public License
13  * along with this program. If not, see <http://www.gnu.org/licenses/>.
14  */
15 
24 #include "neural_activations.h"
25 #include "neural_layer_avgpool.h"
26 #include "neural_layer_connected.h"
28 #include "neural_layer_dropout.h"
29 #include "neural_layer_lstm.h"
30 #include "neural_layer_maxpool.h"
31 #include "neural_layer_noise.h"
32 #include "neural_layer_recurrent.h"
33 #include "neural_layer_softmax.h"
34 #include "neural_layer_upsample.h"
35 #include "utils.h"
36 
41 void
43 {
44  switch (l->type) {
45  case CONNECTED:
47  break;
48  case DROPOUT:
50  break;
51  case NOISE:
53  break;
54  case RECURRENT:
56  break;
57  case LSTM:
59  break;
60  case SOFTMAX:
62  break;
63  case MAXPOOL:
65  break;
66  case CONVOLUTIONAL:
68  break;
69  case AVGPOOL:
71  break;
72  case UPSAMPLE:
74  break;
75  default:
76  printf("Error setting layer vptr for type: %d\n", l->type);
77  exit(EXIT_FAILURE);
78  }
79 }
80 
87 bool
88 layer_mutate_eta(struct Layer *l, const double mu)
89 {
90  const double orig = l->eta;
91  l->eta += rand_normal(0, mu);
92  l->eta = clamp(l->eta, l->eta_min, l->eta_max);
93  if (l->eta != orig) {
94  return true;
95  }
96  return false;
97 }
98 
105 int
106 layer_mutate_neurons(const struct Layer *l, const double mu)
107 {
108  int n = 0;
109  if (rand_uniform(0, 0.1) < mu) { // 10x higher probability
110  while (n == 0) {
111  const double m = clamp(rand_normal(0, 0.5), -1, 1);
112  n = (int) round(m * l->max_neuron_grow);
113  }
114  if (l->n_outputs + n < 1) {
115  n = -(l->n_outputs - 1);
116  } else if (l->n_outputs + n > l->max_outputs) {
117  n = l->max_outputs - l->n_outputs;
118  }
119  }
120  return n;
121 }
122 
129 void
130 layer_add_neurons(struct Layer *l, const int N)
131 {
132  const int old_n_outputs = l->n_outputs;
133  const int old_n_weights = l->n_weights;
134  l->n_outputs += N;
135  l->n_biases = l->n_outputs;
136  l->n_weights = l->n_outputs * l->n_inputs;
139  l->weights = realloc(l->weights, sizeof(double) * l->n_weights);
140  l->weight_active = realloc(l->weight_active, sizeof(bool) * l->n_weights);
141  l->weight_updates =
142  realloc(l->weight_updates, sizeof(double) * l->n_weights);
143  l->state = realloc(l->state, sizeof(double) * l->n_outputs);
144  l->output = realloc(l->output, sizeof(double) * l->n_outputs);
145  l->biases = realloc(l->biases, sizeof(double) * l->n_biases);
146  l->bias_updates = realloc(l->bias_updates, sizeof(double) * l->n_biases);
147  l->delta = realloc(l->delta, sizeof(double) * l->n_outputs);
148  for (int i = old_n_weights; i < l->n_weights; ++i) {
149  if (l->options & LAYER_EVOLVE_CONNECT && rand_uniform(0, 1) < 0.5) {
150  l->weights[i] = 0;
151  l->weight_active[i] = false;
152  } else {
153  l->weights[i] = rand_normal(0, WEIGHT_SD);
154  l->weight_active[i] = true;
155  }
156  l->weight_updates[i] = 0;
157  }
158  for (int i = old_n_outputs; i < l->n_outputs; ++i) {
159  l->biases[i] = 0;
160  l->bias_updates[i] = 0;
161  l->output[i] = 0;
162  l->state[i] = 0;
163  l->delta[i] = 0;
164  }
166 }
167 
175 bool
176 layer_mutate_connectivity(struct Layer *l, const double mu_enable,
177  const double mu_disable)
178 {
179  bool mod = false;
180  if (l->n_inputs > 1 && l->n_outputs > 1) {
181  for (int i = 0; i < l->n_weights; ++i) {
182  if (!l->weight_active[i] && rand_uniform(0, 1) < mu_enable) {
183  l->weight_active[i] = true;
184  l->weights[i] = rand_normal(0, WEIGHT_SD);
185  ++(l->n_active);
186  mod = true;
187  } else if (l->weight_active[i] && rand_uniform(0, 1) < mu_disable) {
188  l->weight_active[i] = false;
189  l->weights[i] = 0;
190  --(l->n_active);
191  mod = true;
192  }
193  }
194  }
195  return mod;
196 }
197 
203 void
205 {
206  // each neuron must be connected to at least one input
207  for (int i = 0; i < l->n_outputs; ++i) {
208  int active = 0;
209  const int offset = l->n_inputs * i;
210  for (int j = 0; j < l->n_inputs; ++j) {
211  if (l->weight_active[offset + j]) {
212  ++active;
213  }
214  }
215  if (active < 1) {
216  const int r = rand_uniform_int(0, l->n_inputs);
217  l->weights[offset + r] = rand_normal(0, WEIGHT_SD);
218  l->weight_active[offset + r] = true;
219  ++(l->n_active);
220  ++active;
221  }
222  }
223  // each input must be represented at least once
224  for (int i = 0; i < l->n_inputs; ++i) {
225  int active = 0;
226  for (int j = 0; j < l->n_outputs; ++j) {
227  if (l->weight_active[l->n_inputs * j + i]) {
228  ++active;
229  }
230  }
231  while (active < 1) {
232  const int offset = l->n_inputs * rand_uniform_int(0, l->n_outputs);
233  if (!l->weight_active[offset + i]) {
234  l->weights[offset + i] = rand_normal(0, WEIGHT_SD);
235  l->weight_active[offset + i] = true;
236  ++(l->n_active);
237  ++active;
238  }
239  }
240  }
241 }
242 
251 bool
252 layer_mutate_weights(struct Layer *l, const double mu)
253 {
254  bool mod = false;
255  for (int i = 0; i < l->n_weights; ++i) {
256  if (l->weight_active[i]) {
257  const double orig = l->weights[i];
258  l->weights[i] += rand_normal(0, mu);
259  l->weights[i] = clamp(l->weights[i], WEIGHT_MIN, WEIGHT_MAX);
260  if (l->weights[i] != orig) {
261  mod = true;
262  }
263  }
264  }
265  for (int i = 0; i < l->n_biases; ++i) {
266  const double orig = l->biases[i];
267  l->biases[i] += rand_normal(0, mu);
268  l->biases[i] = clamp(l->biases[i], WEIGHT_MIN, WEIGHT_MAX);
269  if (l->biases[i] != orig) {
270  mod = true;
271  }
272  }
273  return mod;
274 }
275 
282 bool
283 layer_mutate_functions(struct Layer *l, const double mu)
284 {
285  bool mod = false;
286  if (rand_uniform(0, 1) < mu) {
287  const int orig = l->function;
289  if (l->function != orig) {
290  mod = true;
291  }
292  }
293  if (l->type == LSTM && rand_uniform(0, 1) < mu) {
294  const int orig = l->recurrent_function;
296  if (l->recurrent_function != orig) {
297  mod = true;
298  }
299  }
300  return mod;
301 }
302 
308 void
309 layer_weight_print(const struct Layer *l, const bool print_weights)
310 {
311  char *json_str = layer_weight_json(l, print_weights);
312  printf("%s\n", json_str);
313  free(json_str);
314 }
315 
323 char *
324 layer_weight_json(const struct Layer *l, const bool return_weights)
325 {
326  cJSON *json = cJSON_CreateObject();
327  cJSON_AddNumberToObject(json, "n_weights", l->n_weights);
328  if (return_weights) {
329  cJSON *weights = cJSON_CreateDoubleArray(l->weights, l->n_weights);
330  cJSON_AddItemToObject(json, "weights", weights);
331  }
332  cJSON_AddNumberToObject(json, "n_biases", l->n_biases);
333  if (return_weights) {
334  cJSON *biases = cJSON_CreateDoubleArray(l->biases, l->n_biases);
335  cJSON_AddItemToObject(json, "biases", biases);
336  }
337  cJSON_AddNumberToObject(json, "n_active", l->n_active);
338  char *string = cJSON_Print(json);
339  cJSON_Delete(json);
340  return string;
341 }
342 
347 void
349 {
350  l->n_active = l->n_weights;
351  for (int i = 0; i < l->n_weights; ++i) {
352  l->weights[i] = rand_normal(0, WEIGHT_SD_RAND);
353  l->weight_active[i] = true;
354  }
355  for (int i = 0; i < l->n_biases; ++i) {
356  l->biases[i] = rand_normal(0, WEIGHT_SD_RAND);
357  }
358 }
359 
364 void
365 layer_weight_clamp(const struct Layer *l)
366 {
367  for (int i = 0; i < l->n_weights; ++i) {
368  if (l->weight_active[i]) {
369  l->weights[i] = clamp(l->weights[i], WEIGHT_MIN, WEIGHT_MAX);
370  } else {
371  l->weights[i] = 0;
372  }
373  }
374  for (int i = 0; i < l->n_biases; ++i) {
375  l->biases[i] = clamp(l->biases[i], WEIGHT_MIN, WEIGHT_MAX);
376  }
377 }
378 
383 void
385 {
386  l->n_active = 0;
387  for (int i = 0; i < l->n_weights; ++i) {
388  if (l->weight_active[i]) {
389  ++(l->n_active);
390  }
391  }
392 }
393 
398 void
400 {
401  if (l->options & LAYER_EVOLVE_ETA) {
402  l->eta = rand_uniform(l->eta_min, l->eta_max);
403  } else {
404  l->eta = l->eta_max;
405  }
406 }
407 
412 void
414 {
415  l->type = 0;
416  l->state = NULL;
417  l->output = NULL;
418  l->options = 0;
419  l->weights = NULL;
420  l->weight_active = NULL;
421  l->biases = NULL;
422  l->bias_updates = NULL;
423  l->weight_updates = NULL;
424  l->delta = NULL;
425  l->mu = NULL;
426  l->eta = 0;
427  l->eta_max = 0;
428  l->eta_min = 0;
429  l->momentum = 0;
430  l->decay = 0;
431  l->n_inputs = 0;
432  l->n_outputs = 0;
433  l->max_outputs = 0;
434  l->max_neuron_grow = 0;
435  l->n_weights = 0;
436  l->n_biases = 0;
437  l->n_active = 0;
438  l->function = 0;
439  l->scale = 0;
440  l->probability = 0;
441  l->layer_vptr = NULL;
442  l->prev_state = NULL;
443  l->input_layer = NULL;
444  l->self_layer = NULL;
445  l->output_layer = NULL;
446  l->recurrent_function = 0;
447  l->uf = NULL;
448  l->ui = NULL;
449  l->ug = NULL;
450  l->uo = NULL;
451  l->wf = NULL;
452  l->wi = NULL;
453  l->wg = NULL;
454  l->wo = NULL;
455  l->cell = NULL;
456  l->prev_cell = NULL;
457  l->f = NULL;
458  l->i = NULL;
459  l->g = NULL;
460  l->o = NULL;
461  l->c = NULL;
462  l->h = NULL;
463  l->temp = NULL;
464  l->temp2 = NULL;
465  l->temp3 = NULL;
466  l->dc = NULL;
467  l->height = 0;
468  l->width = 0;
469  l->channels = 0;
470  l->pad = 0;
471  l->out_w = 0;
472  l->out_h = 0;
473  l->out_c = 0;
474  l->size = 0;
475  l->stride = 0;
476  l->indexes = NULL;
477  l->n_filters = 0;
478 }
479 
485 const char *
486 layer_type_as_string(const int type)
487 {
488  switch (type) {
489  case CONNECTED:
490  return STRING_CONNECTED;
491  case DROPOUT:
492  return STRING_DROPOUT;
493  case NOISE:
494  return STRING_NOISE;
495  case SOFTMAX:
496  return STRING_SOFTMAX;
497  case RECURRENT:
498  return STRING_RECURRENT;
499  case LSTM:
500  return STRING_LSTM;
501  case MAXPOOL:
502  return STRING_MAXPOOL;
503  case CONVOLUTIONAL:
504  return STRING_CONVOLUTIONAL;
505  case AVGPOOL:
506  return STRING_AVGPOOL;
507  case UPSAMPLE:
508  return STRING_UPSAMPLE;
509  default:
510  printf("layer_type_as_string(): invalid type: %d\n", type);
511  exit(EXIT_FAILURE);
512  }
513 }
514 
520 bool
521 layer_receives_images(const int type)
522 {
523  switch (type) {
524  case AVGPOOL:
525  case MAXPOOL:
526  case UPSAMPLE:
527  case CONVOLUTIONAL:
528  return true;
529  default:
530  return false;
531  }
532 }
533 
539 int
540 layer_type_as_int(const char *type)
541 {
542  if (strncmp(type, STRING_CONNECTED, 10) == 0) {
543  return CONNECTED;
544  }
545  if (strncmp(type, STRING_DROPOUT, 8) == 0) {
546  return DROPOUT;
547  }
548  if (strncmp(type, STRING_SOFTMAX, 8) == 0) {
549  return SOFTMAX;
550  }
551  if (strncmp(type, STRING_NOISE, 6) == 0) {
552  return NOISE;
553  }
554  if (strncmp(type, STRING_RECURRENT, 9) == 0) {
555  return RECURRENT;
556  }
557  if (strncmp(type, STRING_LSTM, 5) == 0) {
558  return LSTM;
559  }
560  if (strncmp(type, STRING_MAXPOOL, 8) == 0) {
561  return MAXPOOL;
562  }
563  if (strncmp(type, STRING_CONVOLUTIONAL, 14) == 0) {
564  return CONVOLUTIONAL;
565  }
566  if (strncmp(type, STRING_AVGPOOL, 8) == 0) {
567  return AVGPOOL;
568  }
569  if (strncmp(type, STRING_UPSAMPLE, 9) == 0) {
570  return UPSAMPLE;
571  }
572  printf("layer_type_as_int(): invalid type: %s\n", type);
573  exit(EXIT_FAILURE);
574 }
575 
580 void
581 layer_guard_biases(const struct Layer *l)
582 {
583  if (l->n_biases < 1 || l->n_biases > N_OUTPUTS_MAX) {
584  printf("layer_guard_biases() invalid size\n");
585  layer_print(l, false);
586  exit(EXIT_FAILURE);
587  }
588 }
589 
594 void
595 layer_guard_outputs(const struct Layer *l)
596 {
597  if (l->n_outputs < 1 || l->n_outputs > N_OUTPUTS_MAX) {
598  printf("layer_guard_outputs() invalid size\n");
599  layer_print(l, false);
600  exit(EXIT_FAILURE);
601  }
602 }
603 
608 void
609 layer_guard_weights(const struct Layer *l)
610 {
611  if (l->n_weights < 1 || l->n_weights > N_WEIGHTS_MAX) {
612  printf("layer_guard_weights() invalid size\n");
613  layer_print(l, false);
614  exit(EXIT_FAILURE);
615  }
616 }
Neural network activation functions.
#define NUM_ACTIVATIONS
Number of activations available.
void layer_guard_biases(const struct Layer *l)
Check number of biases is within bounds.
Definition: neural_layer.c:581
bool layer_mutate_connectivity(struct Layer *l, const double mu_enable, const double mu_disable)
Mutates a layer's connectivity by zeroing weights.
Definition: neural_layer.c:176
void layer_weight_print(const struct Layer *l, const bool print_weights)
Prints a layer's weights and biases.
Definition: neural_layer.c:309
const char * layer_type_as_string(const int type)
Returns a string representation of a layer type from an integer.
Definition: neural_layer.c:486
void layer_defaults(struct Layer *l)
Initialises a layer to default values.
Definition: neural_layer.c:413
int layer_mutate_neurons(const struct Layer *l, const double mu)
Returns the number of neurons to add or remove from a layer.
Definition: neural_layer.c:106
bool layer_mutate_functions(struct Layer *l, const double mu)
Mutates a layer's activation function by random selection.
Definition: neural_layer.c:283
int layer_type_as_int(const char *type)
Returns the integer representation of a layer type given a name.
Definition: neural_layer.c:540
char * layer_weight_json(const struct Layer *l, const bool return_weights)
Returns a json formatted string representation of a layer's weights.
Definition: neural_layer.c:324
void layer_set_vptr(struct Layer *l)
Sets a neural network layer's functions to the implementations.
Definition: neural_layer.c:42
void layer_weight_clamp(const struct Layer *l)
Clamps a layer's weights and biases in range [WEIGHT_MIN, WEIGHT_MAX].
Definition: neural_layer.c:365
void layer_guard_outputs(const struct Layer *l)
Check number of outputs is within bounds.
Definition: neural_layer.c:595
void layer_weight_rand(struct Layer *l)
Randomises a layer's weights and biases.
Definition: neural_layer.c:348
void layer_add_neurons(struct Layer *l, const int N)
Adds N neurons to a layer. Negative N removes neurons.
Definition: neural_layer.c:130
void layer_ensure_input_represention(struct Layer *l)
Ensures that each neuron is connected to at least one input and each input is connected to at least o...
Definition: neural_layer.c:204
void layer_calc_n_active(struct Layer *l)
Recalculates the number of active connections within a layer.
Definition: neural_layer.c:384
bool layer_receives_images(const int type)
Returns a whether a layer type expects images as input.
Definition: neural_layer.c:521
void layer_init_eta(struct Layer *l)
Initialises a layer's gradient descent rate.
Definition: neural_layer.c:399
bool layer_mutate_eta(struct Layer *l, const double mu)
Mutates the gradient descent rate of a neural layer.
Definition: neural_layer.c:88
bool layer_mutate_weights(struct Layer *l, const double mu)
Mutates a layer's weights and biases by adding random numbers from a Gaussian normal distribution wit...
Definition: neural_layer.c:252
void layer_guard_weights(const struct Layer *l)
Check number of weights is within bounds.
Definition: neural_layer.c:609
#define WEIGHT_SD_RAND
Std dev of Gaussian for weight randomising.
Definition: neural_layer.h:68
#define STRING_CONVOLUTIONAL
Convolutional.
Definition: neural_layer.h:47
#define STRING_LSTM
LSTM.
Definition: neural_layer.h:45
#define NOISE
Layer type noise.
Definition: neural_layer.h:31
#define STRING_MAXPOOL
Maxpool.
Definition: neural_layer.h:46
#define STRING_DROPOUT
Dropout.
Definition: neural_layer.h:41
#define LAYER_EVOLVE_ETA
Layer may evolve rate of gradient descent.
Definition: neural_layer.h:55
#define STRING_NOISE
Noise.
Definition: neural_layer.h:42
#define STRING_RECURRENT
Recurrent.
Definition: neural_layer.h:44
#define UPSAMPLE
Layer type upsample.
Definition: neural_layer.h:38
#define STRING_AVGPOOL
Avgpool.
Definition: neural_layer.h:48
#define LSTM
Layer type LSTM.
Definition: neural_layer.h:34
#define WEIGHT_MAX
Maximum value of a weight or bias.
Definition: neural_layer.h:61
#define SOFTMAX
Layer type softmax.
Definition: neural_layer.h:32
#define N_OUTPUTS_MAX
Maximum number of outputs per layer.
Definition: neural_layer.h:64
#define LAYER_EVOLVE_CONNECT
Layer may evolve connectivity.
Definition: neural_layer.h:56
#define WEIGHT_SD
Std dev of Gaussian for weight resizing.
Definition: neural_layer.h:67
#define WEIGHT_MIN
Minimum value of a weight or bias.
Definition: neural_layer.h:60
#define RECURRENT
Layer type recurrent.
Definition: neural_layer.h:33
#define AVGPOOL
Layer type average pooling.
Definition: neural_layer.h:37
#define STRING_CONNECTED
Connected.
Definition: neural_layer.h:40
#define DROPOUT
Layer type dropout.
Definition: neural_layer.h:30
#define STRING_UPSAMPLE
Upsample.
Definition: neural_layer.h:49
#define CONVOLUTIONAL
Layer type convolutional.
Definition: neural_layer.h:36
#define MAXPOOL
Layer type maxpooling.
Definition: neural_layer.h:35
static void layer_print(const struct Layer *l, const bool print_weights)
Prints the layer.
Definition: neural_layer.h:270
#define CONNECTED
Layer type connected.
Definition: neural_layer.h:29
#define STRING_SOFTMAX
Softmax.
Definition: neural_layer.h:43
#define N_WEIGHTS_MAX
Maximum number of weights per layer.
Definition: neural_layer.h:62
An implementation of an average pooling layer.
static struct LayerVtbl const layer_avgpool_vtbl
Neural average pooling layer implemented functions.
An implementation of a fully-connected layer of perceptrons.
static struct LayerVtbl const layer_connected_vtbl
Neural connected layer implemented functions.
An implementation of a 2D convolutional layer.
static struct LayerVtbl const layer_convolutional_vtbl
Neural convolutional layer implemented functions.
An implementation of a dropout layer.
static struct LayerVtbl const layer_dropout_vtbl
Neural dropout layer implemented functions.
An implementation of a long short-term memory layer.
static struct LayerVtbl const layer_lstm_vtbl
Neural long short-term memory layer implemented functions.
An implementation of a 2D maxpooling layer.
static struct LayerVtbl const layer_maxpool_vtbl
Neural maxpooling layer implemented functions.
An implementation of a Gaussian noise adding layer.
static struct LayerVtbl const layer_noise_vtbl
Neural Gaussian noise layer implemented functions.
An implementation of a recurrent layer of perceptrons.
static struct LayerVtbl const layer_recurrent_vtbl
Neural recurrent layer implemented functions.
An implementation of a softmax layer.
static struct LayerVtbl const layer_softmax_vtbl
Neural softmax layer implemented functions.
An implementation of a 2D upsampling layer.
static struct LayerVtbl const layer_upsample_vtbl
Neural upsampling layer implemented functions.
Neural network layer data structure.
Definition: neural_layer.h:73
struct Layer * wf
LSTM.
Definition: neural_layer.h:110
double * output
Current neuron outputs (after activation function)
Definition: neural_layer.h:76
double decay
Weight decay for gradient descent.
Definition: neural_layer.h:89
struct Layer * wo
LSTM.
Definition: neural_layer.h:113
int size
Pool and Conv.
Definition: neural_layer.h:133
struct Layer * input_layer
Recursive layer input.
Definition: neural_layer.h:102
double * state
Current neuron states (before activation function)
Definition: neural_layer.h:75
struct Layer * uo
LSTM.
Definition: neural_layer.h:109
int pad
Pool and Conv.
Definition: neural_layer.h:129
int recurrent_function
LSTM.
Definition: neural_layer.h:105
int max_neuron_grow
Maximum number neurons to add per mutation event.
Definition: neural_layer.h:93
struct Layer * ug
LSTM.
Definition: neural_layer.h:108
int stride
Pool, Conv, and Upsample.
Definition: neural_layer.h:134
int n_inputs
Number of layer inputs.
Definition: neural_layer.h:90
double * g
LSTM.
Definition: neural_layer.h:118
int n_biases
Number of layer biases.
Definition: neural_layer.h:95
bool * weight_active
Whether each connection is present in the layer.
Definition: neural_layer.h:79
double * weights
Weights for calculating neuron states.
Definition: neural_layer.h:78
double * weight_updates
Updates to weights.
Definition: neural_layer.h:82
int n_filters
Conv.
Definition: neural_layer.h:136
double * mu
Mutation rates.
Definition: neural_layer.h:84
int channels
Pool, Conv, and Upsample.
Definition: neural_layer.h:128
double scale
Usage depends on layer implementation.
Definition: neural_layer.h:98
int function
Layer activation function.
Definition: neural_layer.h:97
double * c
LSTM.
Definition: neural_layer.h:120
int height
Pool, Conv, and Upsample.
Definition: neural_layer.h:126
double * temp
LSTM.
Definition: neural_layer.h:122
struct Layer * wg
LSTM.
Definition: neural_layer.h:112
struct LayerVtbl const * layer_vptr
Functions acting on layers.
Definition: neural_layer.h:100
int max_outputs
Maximum number of neurons in the layer.
Definition: neural_layer.h:92
double * h
LSTM.
Definition: neural_layer.h:121
int width
Pool, Conv, and Upsample.
Definition: neural_layer.h:127
int n_weights
Number of layer weights.
Definition: neural_layer.h:94
double * dc
LSTM.
Definition: neural_layer.h:125
double probability
Usage depends on layer implementation.
Definition: neural_layer.h:99
double * bias_updates
Updates to biases.
Definition: neural_layer.h:81
double * temp3
LSTM.
Definition: neural_layer.h:124
double eta_max
Maximum gradient descent rate.
Definition: neural_layer.h:86
struct Layer * uf
LSTM.
Definition: neural_layer.h:106
double * i
LSTM.
Definition: neural_layer.h:117
double * temp2
LSTM.
Definition: neural_layer.h:123
struct Layer * output_layer
Recursive layer output.
Definition: neural_layer.h:104
double * o
LSTM.
Definition: neural_layer.h:119
int n_outputs
Number of layer outputs.
Definition: neural_layer.h:91
double * biases
Biases for calculating neuron states.
Definition: neural_layer.h:80
struct Layer * self_layer
Recursive layer self.
Definition: neural_layer.h:103
int n_active
Number of active weights / connections.
Definition: neural_layer.h:96
struct Layer * ui
LSTM.
Definition: neural_layer.h:107
double * prev_state
Previous state for recursive layers.
Definition: neural_layer.h:101
int out_w
Pool, Conv, and Upsample.
Definition: neural_layer.h:130
int type
Layer type: CONNECTED, DROPOUT, etc.
Definition: neural_layer.h:74
double * cell
LSTM.
Definition: neural_layer.h:114
int out_c
Pool, Conv, and Upsample.
Definition: neural_layer.h:132
int * indexes
Pool.
Definition: neural_layer.h:135
struct Layer * wi
LSTM.
Definition: neural_layer.h:111
double * delta
Delta for updating weights.
Definition: neural_layer.h:83
uint32_t options
Bitwise layer options permitting evolution, SGD, etc.
Definition: neural_layer.h:77
double * f
LSTM.
Definition: neural_layer.h:116
double * prev_cell
LSTM.
Definition: neural_layer.h:115
int out_h
Pool, Conv, and Upsample.
Definition: neural_layer.h:131
double eta_min
Minimum gradient descent rate.
Definition: neural_layer.h:87
double eta
Gradient descent rate.
Definition: neural_layer.h:85
double momentum
Momentum for gradient descent.
Definition: neural_layer.h:88
int rand_uniform_int(const int min, const int max)
Returns a uniform random integer [min,max] not inclusive of max.
Definition: utils.c:74
double rand_normal(const double mu, const double sigma)
Returns a random Gaussian with specified mean and standard deviation.
Definition: utils.c:87
double rand_uniform(const double min, const double max)
Returns a uniform random float [min,max].
Definition: utils.c:62
Utility functions for random number handling, etc.
static double clamp(const double a, const double min, const double max)
Returns a float clamped within the specified range.
Definition: utils.h:60