36 #define CONVOLUTIONAL (7)
40 #define STRING_CONNECTED ("connected\0")
41 #define STRING_DROPOUT ("dropout\0")
42 #define STRING_NOISE ("noise\0")
43 #define STRING_SOFTMAX ("softmax\0")
44 #define STRING_RECURRENT ("recurrent\0")
45 #define STRING_LSTM ("lstm\0")
46 #define STRING_MAXPOOL ("maxpool\0")
47 #define STRING_CONVOLUTIONAL ("convolutional\0")
48 #define STRING_AVGPOOL ("avgpool\0")
49 #define STRING_UPSAMPLE ("upsample\0")
51 #define LAYER_EVOLVE_WEIGHTS (1 << 0)
52 #define LAYER_EVOLVE_NEURONS (1 << 1)
53 #define LAYER_EVOLVE_FUNCTIONS (1 << 2)
54 #define LAYER_SGD_WEIGHTS (1 << 3)
55 #define LAYER_EVOLVE_ETA (1 << 4)
56 #define LAYER_EVOLVE_CONNECT (1 << 5)
58 #define NEURON_MIN (-100)
59 #define NEURON_MAX (100)
60 #define WEIGHT_MIN (-10)
61 #define WEIGHT_MAX (10)
62 #define N_WEIGHTS_MAX (20000000)
63 #define N_INPUTS_MAX (2000000)
64 #define N_OUTPUTS_MAX (2000000)
66 #define WEIGHT_SD_INIT (0.1)
67 #define WEIGHT_SD (0.1)
68 #define WEIGHT_SD_RAND (1.0)
147 struct Layer *(*layer_impl_copy)(
const struct Layer *src);
153 const double *input,
double *delta);
155 const double *input);
156 double *(*layer_impl_output)(
const struct Layer *l);
159 char *(*layer_impl_json_export)(
const struct Layer *l,
160 const bool return_weights);
168 static inline double *
195 const double *input,
double *
delta)
238 static inline struct Layer *
288 const double mu_disable);
355 static inline struct Layer *
375 size_t s = fwrite(&l->
type,
sizeof(
int), 1, fp);
390 size_t s = fread(&l->
type,
sizeof(
int), 1, fp);
An implementation of a multi-layer perceptron neural network.
void layer_guard_biases(const struct Layer *l)
Check number of biases is within bounds.
bool layer_mutate_connectivity(struct Layer *l, const double mu_enable, const double mu_disable)
Mutates a layer's connectivity by zeroing weights.
void layer_add_neurons(struct Layer *l, const int n)
Adds N neurons to a layer. Negative N removes neurons.
void layer_weight_print(const struct Layer *l, const bool print_weights)
Prints a layer's weights and biases.
const char * layer_type_as_string(const int type)
Returns a string representation of a layer type from an integer.
static void layer_rand(struct Layer *l)
Randomises a layer.
void layer_defaults(struct Layer *l)
Initialises a layer to default values.
static void layer_resize(struct Layer *l, const struct Layer *prev)
Resizes a layer using the previous layer's inputs.
static double * layer_output(const struct Layer *l)
Returns the outputs of a layer.
static size_t layer_save(const struct Layer *l, FILE *fp)
Writes the layer to a file.
static struct Layer * layer_init(const struct ArgsLayer *args)
Creates and initialises a new layer.
int layer_mutate_neurons(const struct Layer *l, const double mu)
Returns the number of neurons to add or remove from a layer.
static struct Layer * layer_copy(const struct Layer *src)
Creates and returns a copy of a specified layer.
bool layer_mutate_functions(struct Layer *l, const double mu)
Mutates a layer's activation function by random selection.
int layer_type_as_int(const char *type)
Returns the integer representation of a layer type given a name.
static void layer_free(const struct Layer *l)
Frees the memory used by the layer.
static void layer_backward(const struct Layer *l, const struct Net *net, const double *input, double *delta)
Backward propagates the error through a layer.
static size_t layer_load(struct Layer *l, FILE *fp)
Reads the layer from a file.
char * layer_weight_json(const struct Layer *l, const bool return_weights)
Returns a json formatted string representation of a layer's weights.
void layer_set_vptr(struct Layer *l)
Sets a neural network layer's functions to the implementations.
void layer_weight_clamp(const struct Layer *l)
Clamps a layer's weights and biases in range [WEIGHT_MIN, WEIGHT_MAX].
void layer_guard_outputs(const struct Layer *l)
Check number of outputs is within bounds.
void layer_weight_rand(struct Layer *l)
Randomises a layer's weights and biases.
void layer_ensure_input_represention(struct Layer *l)
Ensures that each neuron is connected to at least one input and each input is connected to at least o...
static void layer_update(const struct Layer *l)
Updates the weights and biases of a layer.
static void layer_forward(const struct Layer *l, const struct Net *net, const double *input)
Forward propagates an input through the layer.
void layer_calc_n_active(struct Layer *l)
Recalculates the number of active connections within a layer.
static char * layer_json_export(const struct Layer *l, const bool return_weights)
Returns a json formatted string representation of a layer.
bool layer_receives_images(const int type)
Returns a whether a layer type expects images as input.
static bool layer_mutate(struct Layer *l)
Performs layer mutation.
void layer_init_eta(struct Layer *l)
Initialises a layer's gradient descent rate.
bool layer_mutate_eta(struct Layer *l, const double mu)
Mutates the gradient descent rate of a neural layer.
static void layer_print(const struct Layer *l, const bool print_weights)
Prints the layer.
bool layer_mutate_weights(struct Layer *l, const double mu)
Mutates a layer's weights and biases by adding random numbers from a Gaussian normal distribution wit...
void layer_guard_weights(const struct Layer *l)
Check number of weights is within bounds.
Functions operating on neural network arguments/constants.
Parameters for initialising a neural network layer.
int type
Layer type: CONNECTED, DROPOUT, etc.
Neural network layer interface data structure.
void(* layer_impl_resize)(struct Layer *l, const struct Layer *prev)
void(* layer_impl_rand)(struct Layer *l)
void(* layer_impl_forward)(const struct Layer *l, const struct Net *net, const double *input)
char *(* layer_impl_json_export)(const struct Layer *l, const bool return_weights)
void(* layer_impl_free)(const struct Layer *l)
double *(* layer_impl_output)(const struct Layer *l)
bool(* layer_impl_mutate)(struct Layer *l)
struct Layer *(* layer_impl_copy)(const struct Layer *src)
void(* layer_impl_init)(struct Layer *l, const struct ArgsLayer *args)
size_t(* layer_impl_load)(struct Layer *l, FILE *fp)
size_t(* layer_impl_save)(const struct Layer *l, FILE *fp)
void(* layer_impl_print)(const struct Layer *l, const bool print_weights)
void(* layer_impl_backward)(const struct Layer *l, const struct Net *net, const double *input, double *delta)
void(* layer_impl_update)(const struct Layer *l)
Neural network layer data structure.
double * output
Current neuron outputs (after activation function)
double decay
Weight decay for gradient descent.
struct Layer * input_layer
Recursive layer input.
double * state
Current neuron states (before activation function)
int recurrent_function
LSTM.
int max_neuron_grow
Maximum number neurons to add per mutation event.
int stride
Pool, Conv, and Upsample.
int n_inputs
Number of layer inputs.
int n_biases
Number of layer biases.
bool * weight_active
Whether each connection is present in the layer.
double * weights
Weights for calculating neuron states.
double * weight_updates
Updates to weights.
double * mu
Mutation rates.
int channels
Pool, Conv, and Upsample.
double scale
Usage depends on layer implementation.
int height
Pool, Conv, and Upsample.
struct LayerVtbl const * layer_vptr
Functions acting on layers.
int max_outputs
Maximum number of neurons in the layer.
int width
Pool, Conv, and Upsample.
int n_weights
Number of layer weights.
double probability
Usage depends on layer implementation.
double * bias_updates
Updates to biases.
double eta_max
Maximum gradient descent rate.
struct Layer * output_layer
Recursive layer output.
int n_outputs
Number of layer outputs.
double * biases
Biases for calculating neuron states.
struct Layer * self_layer
Recursive layer self.
int n_active
Number of active weights / connections.
double * prev_state
Previous state for recursive layers.
int out_w
Pool, Conv, and Upsample.
int type
Layer type: CONNECTED, DROPOUT, etc.
int out_c
Pool, Conv, and Upsample.
double * delta
Delta for updating weights.
uint32_t options
Bitwise layer options permitting evolution, SGD, etc.
int out_h
Pool, Conv, and Upsample.
double eta_min
Minimum gradient descent rate.
double eta
Gradient descent rate.
double momentum
Momentum for gradient descent.
Neural network data structure.