XCSF 1.4.8
XCSF learning classifier system
Loading...
Searching...
No Matches
neural_layer_dropout.c
Go to the documentation of this file.
1/*
2 * This program is free software: you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License as published by
4 * the Free Software Foundation, either version 3 of the License, or
5 * (at your option) any later version.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 * You should have received a copy of the GNU General Public License
13 * along with this program. If not, see <http://www.gnu.org/licenses/>.
14 */
15
25#include "neural_activations.h"
26#include "utils.h"
27
32static void
34{
36 l->output = calloc(l->n_outputs, sizeof(double));
37 l->delta = calloc(l->n_outputs, sizeof(double));
38 l->state = calloc(l->n_outputs, sizeof(double));
39}
40
45static void
46free_layer_arrays(const struct Layer *l)
47{
48 free(l->output);
49 free(l->delta);
50 free(l->state);
51}
52
58void
59neural_layer_dropout_init(struct Layer *l, const struct ArgsLayer *args)
60{
61 l->n_inputs = args->n_inputs;
62 l->n_outputs = args->n_inputs;
63 l->max_outputs = args->n_inputs;
64 l->probability = args->probability;
65 l->scale = 1. / (1. - l->probability);
66 l->out_c = args->channels;
67 l->out_w = args->width;
68 l->out_h = args->height;
70}
71
76void
78{
80}
81
87struct Layer *
89{
90 if (src->type != DROPOUT) {
91 printf("neural_layer_dropout_copy(): incorrect source layer type\n");
92 exit(EXIT_FAILURE);
93 }
94 struct Layer *l = malloc(sizeof(struct Layer));
96 l->type = src->type;
97 l->layer_vptr = src->layer_vptr;
98 l->n_inputs = src->n_inputs;
99 l->n_outputs = src->n_inputs;
100 l->max_outputs = src->max_outputs;
101 l->probability = src->probability;
102 l->scale = src->scale;
103 l->out_c = src->out_c;
104 l->out_h = src->out_h;
105 l->out_w = src->out_w;
107 return l;
108}
109
114void
116{
117 (void) l;
118}
119
126void
127neural_layer_dropout_forward(const struct Layer *l, const struct Net *net,
128 const double *input)
129{
130 if (!net->train) {
131 memcpy(l->output, input, sizeof(double) * l->n_inputs);
132 } else {
133 for (int i = 0; i < l->n_inputs; ++i) {
134 l->state[i] = rand_uniform(0, 1);
135 if (l->state[i] < l->probability) {
136 l->output[i] = 0;
137 } else {
138 l->output[i] = input[i] * l->scale;
139 }
140 }
141 }
142}
143
151void
152neural_layer_dropout_backward(const struct Layer *l, const struct Net *net,
153 const double *input, double *delta)
154{
155 (void) net;
156 (void) input;
157 if (delta) {
158 for (int i = 0; i < l->n_inputs; ++i) {
159 if (l->state[i] < l->probability) {
160 delta[i] = 0;
161 } else {
162 delta[i] += l->delta[i] * l->scale;
163 }
164 }
165 }
166}
167
172void
174{
175 (void) l;
176}
177
183bool
185{
186 (void) l;
187 return false;
188}
189
195void
196neural_layer_dropout_resize(struct Layer *l, const struct Layer *prev)
197{
198 l->n_inputs = prev->n_outputs;
199 l->n_outputs = prev->n_outputs;
200 l->max_outputs = prev->n_outputs;
201 l->out_w = prev->out_w;
202 l->out_h = prev->out_h;
203 l->out_c = prev->out_c;
206}
207
213double *
215{
216 return l->output;
217}
218
224void
225neural_layer_dropout_print(const struct Layer *l, const bool print_weights)
226{
227 char *json_str = neural_layer_dropout_json_export(l, print_weights);
228 printf("%s\n", json_str);
229 free(json_str);
230}
231
239char *
241 const bool return_weights)
242{
243 (void) return_weights;
244 cJSON *json = cJSON_CreateObject();
245 cJSON_AddStringToObject(json, "type", "dropout");
246 cJSON_AddNumberToObject(json, "n_inputs", l->n_inputs);
247 cJSON_AddNumberToObject(json, "n_outputs", l->n_outputs);
248 cJSON_AddNumberToObject(json, "probability", l->probability);
249 char *string = cJSON_Print(json);
250 cJSON_Delete(json);
251 return string;
252}
253
260size_t
261neural_layer_dropout_save(const struct Layer *l, FILE *fp)
262{
263 size_t s = 0;
264 s += fwrite(&l->n_inputs, sizeof(int), 1, fp);
265 s += fwrite(&l->n_outputs, sizeof(int), 1, fp);
266 s += fwrite(&l->max_outputs, sizeof(int), 1, fp);
267 s += fwrite(&l->probability, sizeof(double), 1, fp);
268 s += fwrite(&l->scale, sizeof(double), 1, fp);
269 s += fwrite(&l->out_w, sizeof(int), 1, fp);
270 s += fwrite(&l->out_h, sizeof(int), 1, fp);
271 s += fwrite(&l->out_c, sizeof(int), 1, fp);
272 return s;
273}
274
281size_t
283{
284 size_t s = 0;
285 s += fread(&l->n_inputs, sizeof(int), 1, fp);
286 s += fread(&l->n_outputs, sizeof(int), 1, fp);
287 s += fread(&l->max_outputs, sizeof(int), 1, fp);
288 s += fread(&l->probability, sizeof(double), 1, fp);
289 s += fread(&l->scale, sizeof(double), 1, fp);
290 s += fread(&l->out_w, sizeof(int), 1, fp);
291 s += fread(&l->out_h, sizeof(int), 1, fp);
292 s += fread(&l->out_c, sizeof(int), 1, fp);
294 return s;
295}
Neural network activation functions.
void layer_defaults(struct Layer *l)
Initialises a layer to default values.
void layer_guard_outputs(const struct Layer *l)
Check number of outputs is within bounds.
#define DROPOUT
Layer type dropout.
double * neural_layer_dropout_output(const struct Layer *l)
Returns the output from a dropout layer.
struct Layer * neural_layer_dropout_copy(const struct Layer *src)
Initialises and creates a copy of one dropout layer from another.
char * neural_layer_dropout_json_export(const struct Layer *l, const bool return_weights)
Returns a json formatted string representation of a dropout layer.
void neural_layer_dropout_resize(struct Layer *l, const struct Layer *prev)
Resizes a dropout layer if the previous layer has changed size.
void neural_layer_dropout_forward(const struct Layer *l, const struct Net *net, const double *input)
Forward propagates a dropout layer.
void neural_layer_dropout_rand(struct Layer *l)
Dummy function since dropout layers have no weights.
static void free_layer_arrays(const struct Layer *l)
Free memory used by a dropout layer.
static void malloc_layer_arrays(struct Layer *l)
Allocate memory used by a dropout layer.
void neural_layer_dropout_update(const struct Layer *l)
Dummy function since a dropout layer has no weights.
size_t neural_layer_dropout_save(const struct Layer *l, FILE *fp)
Writes a dropout layer to a file.
void neural_layer_dropout_print(const struct Layer *l, const bool print_weights)
Prints a dropout layer.
void neural_layer_dropout_init(struct Layer *l, const struct ArgsLayer *args)
Initialises a dropout layer.
void neural_layer_dropout_free(const struct Layer *l)
Free memory used by a dropout layer.
void neural_layer_dropout_backward(const struct Layer *l, const struct Net *net, const double *input, double *delta)
Backward propagates a dropout layer.
size_t neural_layer_dropout_load(struct Layer *l, FILE *fp)
Reads a dropout layer from a file.
bool neural_layer_dropout_mutate(struct Layer *l)
Dummy function since a dropout layer cannot be mutated.
An implementation of a dropout layer.
Parameters for initialising a neural network layer.
double probability
Usage depends on layer implementation.
int channels
Pool, Conv, and Upsample.
int width
Pool, Conv, and Upsample.
int height
Pool, Conv, and Upsample.
int n_inputs
Number of inputs.
Neural network layer data structure.
double * output
Current neuron outputs (after activation function)
double * state
Current neuron states (before activation function)
int n_inputs
Number of layer inputs.
double scale
Usage depends on layer implementation.
struct LayerVtbl const * layer_vptr
Functions acting on layers.
int max_outputs
Maximum number of neurons in the layer.
double probability
Usage depends on layer implementation.
double * i
LSTM.
int n_outputs
Number of layer outputs.
int out_w
Pool, Conv, and Upsample.
int type
Layer type: CONNECTED, DROPOUT, etc.
int out_c
Pool, Conv, and Upsample.
double * delta
Delta for updating weights.
int out_h
Pool, Conv, and Upsample.
Neural network data structure.
Definition neural.h:48
bool train
Whether the network is in training mode.
Definition neural.h:55
double rand_uniform(const double min, const double max)
Returns a uniform random float [min,max].
Definition utils.c:62
Utility functions for random number handling, etc.